|
| 1 | +// Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +// or more contributor license agreements. See the NOTICE file |
| 3 | +// distributed with this work for additional information |
| 4 | +// regarding copyright ownership. The ASF licenses this file |
| 5 | +// to you under the Apache License, Version 2.0 (the |
| 6 | +// "License"); you may not use this file except in compliance |
| 7 | +// with the License. You may obtain a copy of the License at |
| 8 | +// |
| 9 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +// |
| 11 | +// Unless required by applicable law or agreed to in writing, |
| 12 | +// software distributed under the License is distributed on an |
| 13 | +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +// KIND, either express or implied. See the License for the |
| 15 | +// specific language governing permissions and limitations |
| 16 | +// under the License. |
| 17 | + |
| 18 | +import org.apache.doris.regression.suite.ClusterOptions |
| 19 | +import org.awaitility.Awaitility |
| 20 | + |
| 21 | +import static java.util.concurrent.TimeUnit.SECONDS |
| 22 | + |
| 23 | +suite("test_streaming_mysql_job_restart_fe_with_props", "docker,mysql,external_docker,external_docker_mysql,nondatalake") { |
| 24 | + def jobName = "test_streaming_mysql_job_restart_fe_with_props" |
| 25 | + def options = new ClusterOptions() |
| 26 | + options.setFeNum(1) |
| 27 | + options.cloudMode = null |
| 28 | + |
| 29 | + docker(options) { |
| 30 | + def currentDb = (sql "select database()")[0][0] |
| 31 | + def table1 = "restart_props_user_info" |
| 32 | + def mysqlDb = "test_cdc_db" |
| 33 | + |
| 34 | + sql """DROP JOB IF EXISTS where jobname = '${jobName}'""" |
| 35 | + sql """drop table if exists ${currentDb}.${table1} force""" |
| 36 | + |
| 37 | + String enabled = context.config.otherConfigs.get("enableJdbcTest") |
| 38 | + if (enabled != null && enabled.equalsIgnoreCase("true")) { |
| 39 | + String mysql_port = context.config.otherConfigs.get("mysql_57_port"); |
| 40 | + String externalEnvIp = context.config.otherConfigs.get("externalEnvIp") |
| 41 | + String s3_endpoint = getS3Endpoint() |
| 42 | + String bucket = getS3BucketName() |
| 43 | + String driver_url = "https://${bucket}.${s3_endpoint}/regression/jdbc_driver/mysql-connector-j-8.4.0.jar" |
| 44 | + |
| 45 | + connect("root", "123456", "jdbc:mysql://${externalEnvIp}:${mysql_port}") { |
| 46 | + sql """CREATE DATABASE IF NOT EXISTS ${mysqlDb}""" |
| 47 | + sql """DROP TABLE IF EXISTS ${mysqlDb}.${table1}""" |
| 48 | + sql """CREATE TABLE ${mysqlDb}.${table1} ( |
| 49 | + `name` varchar(200) NOT NULL, |
| 50 | + `age` int DEFAULT NULL, |
| 51 | + PRIMARY KEY (`name`) |
| 52 | + ) ENGINE=InnoDB""" |
| 53 | + sql """INSERT INTO ${mysqlDb}.${table1} (name, age) VALUES ('A1', 1);""" |
| 54 | + sql """INSERT INTO ${mysqlDb}.${table1} (name, age) VALUES ('B1', 2);""" |
| 55 | + } |
| 56 | + |
| 57 | + // Create job with explicit max_interval property. |
| 58 | + // Before the fix, after FE restart the max_interval would not be parsed |
| 59 | + // from properties in the constructor, causing timeoutMs=0 and every task |
| 60 | + // to timeout immediately. |
| 61 | + sql """CREATE JOB ${jobName} |
| 62 | + PROPERTIES("max_interval" = "5") |
| 63 | + ON STREAMING |
| 64 | + FROM MYSQL ( |
| 65 | + "jdbc_url" = "jdbc:mysql://${externalEnvIp}:${mysql_port}", |
| 66 | + "driver_url" = "${driver_url}", |
| 67 | + "driver_class" = "com.mysql.cj.jdbc.Driver", |
| 68 | + "user" = "root", |
| 69 | + "password" = "123456", |
| 70 | + "database" = "${mysqlDb}", |
| 71 | + "include_tables" = "${table1}", |
| 72 | + "offset" = "initial" |
| 73 | + ) |
| 74 | + TO DATABASE ${currentDb} ( |
| 75 | + "table.create.properties.replication_num" = "1" |
| 76 | + ) |
| 77 | + """ |
| 78 | + |
| 79 | + // Wait for snapshot data to be loaded |
| 80 | + try { |
| 81 | + Awaitility.await().atMost(300, SECONDS) |
| 82 | + .pollInterval(1, SECONDS).until( |
| 83 | + { |
| 84 | + def jobSucceedCount = sql """ select SucceedTaskCount from jobs("type"="insert") where Name = '${jobName}' and ExecuteType='STREAMING' """ |
| 85 | + log.info("jobSucceedCount: " + jobSucceedCount) |
| 86 | + jobSucceedCount.size() == 1 && '2' <= jobSucceedCount.get(0).get(0) |
| 87 | + } |
| 88 | + ) |
| 89 | + } catch (Exception ex) { |
| 90 | + def showjob = sql """select * from jobs("type"="insert") where Name='${jobName}'""" |
| 91 | + def showtask = sql """select * from tasks("type"="insert") where JobName='${jobName}'""" |
| 92 | + log.info("show job: " + showjob) |
| 93 | + log.info("show task: " + showtask) |
| 94 | + throw ex; |
| 95 | + } |
| 96 | + |
| 97 | + def jobInfoBeforeRestart = sql """ |
| 98 | + select loadStatistic, status, currentOffset from jobs("type"="insert") where Name='${jobName}' |
| 99 | + """ |
| 100 | + log.info("jobInfoBeforeRestart: " + jobInfoBeforeRestart) |
| 101 | + def loadStatBefore = parseJson(jobInfoBeforeRestart.get(0).get(0)) |
| 102 | + assert loadStatBefore.scannedRows == 2 |
| 103 | + assert jobInfoBeforeRestart.get(0).get(1) == "RUNNING" |
| 104 | + |
| 105 | + // Restart FE |
| 106 | + cluster.restartFrontends() |
| 107 | + sleep(60000) |
| 108 | + context.reconnectFe() |
| 109 | + |
| 110 | + // Insert new data after restart |
| 111 | + connect("root", "123456", "jdbc:mysql://${externalEnvIp}:${mysql_port}") { |
| 112 | + sql """INSERT INTO ${mysqlDb}.${table1} (name, age) VALUES ('C1', 3);""" |
| 113 | + } |
| 114 | + |
| 115 | + // Verify new data gets consumed after restart. |
| 116 | + // Before the fix, timeoutMs=0 caused every task to timeout immediately, |
| 117 | + // so the job would be stuck in PAUSED and never consume new data. |
| 118 | + try { |
| 119 | + Awaitility.await().atMost(120, SECONDS) |
| 120 | + .pollInterval(2, SECONDS).until( |
| 121 | + { |
| 122 | + def loadStat = sql """ select loadStatistic from jobs("type"="insert") where Name = '${jobName}' """ |
| 123 | + def stat = parseJson(loadStat.get(0).get(0)) |
| 124 | + log.info("scannedRows after restart: " + stat.scannedRows) |
| 125 | + stat.scannedRows == 3 |
| 126 | + } |
| 127 | + ) |
| 128 | + } catch (Exception ex) { |
| 129 | + def showjob = sql """select * from jobs("type"="insert") where Name='${jobName}'""" |
| 130 | + def showtask = sql """select * from tasks("type"="insert") where JobName='${jobName}'""" |
| 131 | + log.info("show job after restart insert: " + showjob) |
| 132 | + log.info("show task after restart insert: " + showtask) |
| 133 | + throw ex; |
| 134 | + } |
| 135 | + |
| 136 | + def result = sql """select * from ${currentDb}.${table1} order by name""" |
| 137 | + log.info("final result: " + result) |
| 138 | + assert result.size() == 3 |
| 139 | + |
| 140 | + sql """DROP JOB IF EXISTS where jobname = '${jobName}'""" |
| 141 | + } |
| 142 | + } |
| 143 | +} |
0 commit comments