Skip to content

Commit 55de70b

Browse files
committed
oracle test
commit 7ced72b6426e9723f91933ef0144fdda4b9c072d Author: Hang Ruan <[email protected]> Date: Thu Jun 8 17:01:39 2023 +0800 [mysql-cdc] Supports MYSQL_TYPE_TYPED_ARRAY column type when parsing the table map event This closes apache#2001 commit a847655ed68c4d5874ced69006686362f23140e7 Author: Leonard Xu <[email protected]> Date: Thu Jun 8 15:33:54 2023 +0800 [mysql-cdc][hotfix] Optimize the error msg when binlog expire in source commit 32f77ef0075dfd2b522df07809ab2aea8ae6de54 Author: Leonard Xu <[email protected]> Date: Wed Jun 7 17:49:41 2023 +0800 [mysql-cdc] Optimize the error msg when binlog expire or server id conflict This closes apache#2010. commit da5cbf54e6b219956088bb99cec44d93f1011ff7 Author: He Wang <[email protected]> Date: Thu Jun 8 13:50:24 2023 +0800 [oceanbase] add jdbc options and support oracle mode (apache#1854) commit 0e221382601e352215091d9cd1cb59fa2175063b Author: Leonard Xu <[email protected]> Date: Wed Jun 7 17:34:57 2023 +0800 [mysql-cdc] Add handler for catching async exceptions in snapshot reading executor This closes apache#2016. commit 15e5e4d34dc05edcbdcfe15ff906213352a2cd02 Author: Hang Ruan <[email protected]> Date: Wed Jun 7 17:26:59 2023 +0800 [hotfix][mysql-cdc] Use session timezone instead of local time zone as the default server time zone This closes apache#2015. commit 13235e2f2dca73320988a430b765f1fafa8e8f26 Author: Hang Ruan <[email protected]> Date: Wed Jun 7 17:22:07 2023 +0800 [mysql] Fix NullPointerException when database name or table name contains dot This closes apache#2006. commit 6aa0fc4de353473cb7cda844a432e3dd5ddf4fea Author: Hang Ruan <[email protected]> Date: Thu Jan 12 02:18:52 2023 +0000 [mysql] Fix NullPointerException caused by mysql ignores the capitalization when splitting chunks commit e9fed740f55f7f1e675e5fe83f7e04efb5e3f866 Author: ehui <[email protected]> Date: Tue Jun 6 13:06:03 2023 +0800 [sqlserver] Fix old change data that will be captured when the latest mode starts (apache#2176) commit d2ffa0902038e9aec0e2b92fa4e82a066d1b123f Author: Tyrantlucifer <[email protected]> Date: Mon Jun 5 23:15:33 2023 +0800 [core] support print configuration options for connectors (apache#2099) commit a00abf88178feb6a2c699129e1f6aefb005960c7 Author: He Wang <[email protected]> Date: Mon Jun 5 16:40:54 2023 +0800 [oceanbase] support libobcdc 4.x and fix restore timestamp config (apache#2161) commit 061602d Author: ehui <[email protected]> Date: Mon Jun 5 23:49:15 2023 +0800 oralce commit 8dc46cc Author: ehui <[email protected]> Date: Mon Jun 5 00:18:50 2023 +0800 [oracle]
1 parent bb60bfe commit 55de70b

File tree

5 files changed

+374
-5
lines changed

5 files changed

+374
-5
lines changed

flink-cdc-base/src/main/java/com/ververica/cdc/connectors/base/source/reader/external/IncrementalSourceScanFetcher.java

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@
1616

1717
package com.ververica.cdc.connectors.base.source.reader.external;
1818

19+
import org.apache.flink.annotation.VisibleForTesting;
1920
import org.apache.flink.util.FlinkRuntimeException;
2021

2122
import org.apache.flink.shaded.guava30.com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -202,6 +203,11 @@ public void close() {
202203
}
203204
}
204205

206+
@VisibleForTesting
207+
public ExecutorService getExecutorService() {
208+
return executorService;
209+
}
210+
205211
private void assertLowWatermark(SourceRecord lowWatermark) {
206212
checkState(
207213
isLowWatermarkEvent(lowWatermark),

flink-connector-oracle-cdc/src/main/java/com/ververica/cdc/connectors/oracle/source/reader/fetch/OracleScanFetchTask.java

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -103,6 +103,7 @@ public void execute(Context context) throws Exception {
103103
sourceFetchContext.getDatabaseSchema(),
104104
sourceFetchContext.getConnection(),
105105
sourceFetchContext.getDispatcher(),
106+
sourceFetchContext.getSnapshotReceiver(),
106107
split);
107108
SnapshotSplitChangeEventSourceContext changeEventSourceContext =
108109
new SnapshotSplitChangeEventSourceContext();
@@ -128,11 +129,16 @@ public void execute(Context context) throws Exception {
128129
}
129130
// execute redoLog read task
130131
if (snapshotResult.isCompletedOrSkipped()) {
132+
final LogMinerOracleOffsetContextLoader loader =
133+
new LogMinerOracleOffsetContextLoader(
134+
sourceFetchContext.getDbzConnectorConfig());
135+
final OracleOffsetContext streamOffsetContext =
136+
loader.load(backfillBinlogSplit.getStartingOffset().getOffset());
137+
131138
final RedoLogSplitReadTask backfillBinlogReadTask =
132139
createBackfillRedoLogReadTask(backfillBinlogSplit, sourceFetchContext);
133140
backfillBinlogReadTask.execute(
134-
new SnapshotBinlogSplitChangeEventSourceContext(),
135-
sourceFetchContext.getOffsetContext());
141+
new SnapshotBinlogSplitChangeEventSourceContext(), streamOffsetContext);
136142
} else {
137143
taskRunning = false;
138144
throw new IllegalStateException(
@@ -210,6 +216,7 @@ public static class OracleSnapshotSplitReadTask extends AbstractSnapshotChangeEv
210216
private final SnapshotSplit snapshotSplit;
211217
private final OracleOffsetContext offsetContext;
212218
private final SnapshotProgressListener snapshotProgressListener;
219+
private final EventDispatcher.SnapshotReceiver snapshotReceiver;
213220

214221
public OracleSnapshotSplitReadTask(
215222
OracleConnectorConfig connectorConfig,
@@ -218,6 +225,7 @@ public OracleSnapshotSplitReadTask(
218225
OracleDatabaseSchema databaseSchema,
219226
OracleConnection jdbcConnection,
220227
JdbcSourceEventDispatcher dispatcher,
228+
EventDispatcher.SnapshotReceiver snapshotReceiver,
221229
SnapshotSplit snapshotSplit) {
222230
super(connectorConfig, snapshotProgressListener);
223231
this.offsetContext = previousOffset;
@@ -228,6 +236,7 @@ public OracleSnapshotSplitReadTask(
228236
this.clock = Clock.SYSTEM;
229237
this.snapshotSplit = snapshotSplit;
230238
this.snapshotProgressListener = snapshotProgressListener;
239+
this.snapshotReceiver = snapshotReceiver;
231240
}
232241

233242
@Override
@@ -280,7 +289,7 @@ protected SnapshotResult doExecute(
280289
"Snapshot step 3 - Determining high watermark {} for split {}",
281290
highWatermark,
282291
snapshotSplit);
283-
((SnapshotSplitChangeEventSourceContext) (context)).setHighWatermark(lowWatermark);
292+
((SnapshotSplitChangeEventSourceContext) (context)).setHighWatermark(highWatermark);
284293
dispatcher.dispatchWatermarkEvent(
285294
offsetContext.getPartition(), snapshotSplit, highWatermark, WatermarkKind.HIGH);
286295
return SnapshotResult.completed(ctx.offset);
@@ -309,8 +318,6 @@ private void createDataEvents(
309318
RelationalSnapshotChangeEventSource.RelationalSnapshotContext snapshotContext,
310319
TableId tableId)
311320
throws Exception {
312-
EventDispatcher.SnapshotReceiver snapshotReceiver =
313-
dispatcher.getSnapshotChangeEventReceiver();
314321
LOG.debug("Snapshotting table {}", tableId);
315322
createDataEventsForTable(
316323
snapshotContext, snapshotReceiver, databaseSchema.tableFor(tableId));

flink-connector-oracle-cdc/src/main/java/com/ververica/cdc/connectors/oracle/source/reader/fetch/OracleSourceFetchTaskContext.java

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@
4343
import io.debezium.data.Envelope;
4444
import io.debezium.pipeline.DataChangeEvent;
4545
import io.debezium.pipeline.ErrorHandler;
46+
import io.debezium.pipeline.EventDispatcher;
4647
import io.debezium.pipeline.metrics.SnapshotChangeEventSourceMetrics;
4748
import io.debezium.pipeline.source.spi.EventMetadataProvider;
4849
import io.debezium.pipeline.spi.OffsetContext;
@@ -77,6 +78,7 @@ public class OracleSourceFetchTaskContext extends JdbcSourceFetchTaskContext {
7778
private JdbcSourceEventDispatcher dispatcher;
7879
private ChangeEventQueue<DataChangeEvent> queue;
7980
private OracleErrorHandler errorHandler;
81+
private EventDispatcher.SnapshotReceiver snapshotReceiver;
8082

8183
public OracleSourceFetchTaskContext(
8284
JdbcSourceConfig sourceConfig,
@@ -133,6 +135,8 @@ public void configure(SourceSplitBase sourceSplitBase) {
133135
metadataProvider,
134136
schemaNameAdjuster);
135137

138+
this.snapshotReceiver = dispatcher.getSnapshotChangeEventReceiver();
139+
136140
final OracleChangeEventSourceMetricsFactory changeEventSourceMetricsFactory =
137141
new OracleChangeEventSourceMetricsFactory(
138142
new OracleStreamingChangeEventSourceMetrics(
@@ -194,6 +198,10 @@ public JdbcSourceEventDispatcher getDispatcher() {
194198
return dispatcher;
195199
}
196200

201+
public EventDispatcher.SnapshotReceiver getSnapshotReceiver() {
202+
return snapshotReceiver;
203+
}
204+
197205
@Override
198206
public ChangeEventQueue<DataChangeEvent> getQueue() {
199207
return queue;
Lines changed: 248 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,248 @@
1+
/*
2+
* Copyright 2022 Ververica Inc.
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
package com.ververica.cdc.connectors.oracle.source.read.fetch;
18+
19+
import org.apache.flink.table.api.DataTypes;
20+
import org.apache.flink.table.types.DataType;
21+
22+
import com.ververica.cdc.connectors.base.config.JdbcSourceConfig;
23+
import com.ververica.cdc.connectors.base.dialect.JdbcDataSourceDialect;
24+
import com.ververica.cdc.connectors.base.source.assigner.splitter.ChunkSplitter;
25+
import com.ververica.cdc.connectors.base.source.meta.split.SnapshotSplit;
26+
import com.ververica.cdc.connectors.base.source.meta.split.SourceRecords;
27+
import com.ververica.cdc.connectors.base.source.reader.external.IncrementalSourceScanFetcher;
28+
import com.ververica.cdc.connectors.oracle.source.OracleDialect;
29+
import com.ververica.cdc.connectors.oracle.source.OracleSourceTestBase;
30+
import com.ververica.cdc.connectors.oracle.source.config.OracleSourceConfig;
31+
import com.ververica.cdc.connectors.oracle.source.config.OracleSourceConfigFactory;
32+
import com.ververica.cdc.connectors.oracle.source.reader.fetch.OracleScanFetchTask;
33+
import com.ververica.cdc.connectors.oracle.source.reader.fetch.OracleSourceFetchTaskContext;
34+
import com.ververica.cdc.connectors.oracle.utils.OracleTestUtils;
35+
import com.ververica.cdc.connectors.oracle.utils.RecordsFormatter;
36+
import io.debezium.connector.oracle.OracleConnection;
37+
import io.debezium.data.Envelope;
38+
import io.debezium.jdbc.JdbcConnection;
39+
import io.debezium.pipeline.EventDispatcher;
40+
import io.debezium.pipeline.spi.OffsetContext;
41+
import io.debezium.relational.TableId;
42+
import io.debezium.schema.DataCollectionSchema;
43+
import org.apache.kafka.connect.data.Struct;
44+
import org.apache.kafka.connect.header.ConnectHeaders;
45+
import org.apache.kafka.connect.source.SourceRecord;
46+
import org.junit.Test;
47+
48+
import java.sql.SQLException;
49+
import java.util.ArrayList;
50+
import java.util.Arrays;
51+
import java.util.Collection;
52+
import java.util.Iterator;
53+
import java.util.List;
54+
import java.util.Properties;
55+
import java.util.function.Supplier;
56+
import java.util.stream.Collectors;
57+
58+
import static com.ververica.cdc.connectors.oracle.source.utils.OracleConnectionUtils.createOracleConnection;
59+
import static org.junit.Assert.assertNotNull;
60+
import static org.junit.Assert.assertTrue;
61+
62+
/** Tests for {@link OracleScanFetchTask}. */
63+
public class OracleScanFetchTaskTest extends OracleSourceTestBase {
64+
65+
@Test
66+
public void testChangingDataInSnapshotScan() throws Exception {
67+
OracleTestUtils.createAndInitialize(OracleTestUtils.ORACLE_CONTAINER, "customer.sql");
68+
69+
String tableName = ORACLE_SCHEMA + ".CUSTOMERS";
70+
71+
OracleSourceConfigFactory sourceConfigFactory =
72+
getConfigFactory(new String[] {tableName}, 10);
73+
OracleSourceConfig sourceConfig = sourceConfigFactory.create(0);
74+
OracleDialect oracleDialect = new OracleDialect(sourceConfigFactory);
75+
76+
String[] changingDataSql =
77+
new String[] {
78+
"UPDATE " + tableName + " SET address = 'Hangzhou' where id = 103",
79+
"DELETE FROM " + tableName + " where id = 102",
80+
"INSERT INTO " + tableName + " VALUES(102, 'user_2','Shanghai','123567891234')",
81+
"UPDATE " + tableName + " SET address = 'Shanghai' where id = 103",
82+
"UPDATE " + tableName + " SET address = 'Hangzhou' where id = 110",
83+
"UPDATE " + tableName + " SET address = 'Hangzhou' where id = 111",
84+
};
85+
86+
MakeChangeEventTaskContext makeChangeEventTaskContext =
87+
new MakeChangeEventTaskContext(
88+
sourceConfig,
89+
oracleDialect,
90+
createOracleConnection(
91+
sourceConfig.getDbzConnectorConfig().getJdbcConfig()),
92+
() -> executeSql(sourceConfig, changingDataSql));
93+
94+
final DataType dataType =
95+
DataTypes.ROW(
96+
DataTypes.FIELD("ID", DataTypes.BIGINT()),
97+
DataTypes.FIELD("NAME", DataTypes.STRING()),
98+
DataTypes.FIELD("ADDRESS", DataTypes.STRING()),
99+
DataTypes.FIELD("PHONE_NUMBER", DataTypes.STRING()));
100+
101+
List<SnapshotSplit> snapshotSplits = getSnapshotSplits(sourceConfig, oracleDialect);
102+
103+
String[] expected =
104+
new String[] {
105+
"+I[101, user_1, Shanghai, 123567891234]",
106+
"+I[102, user_2, Shanghai, 123567891234]",
107+
"+I[103, user_3, Shanghai, 123567891234]",
108+
"+I[109, user_4, Shanghai, 123567891234]",
109+
"+I[110, user_5, Hangzhou, 123567891234]",
110+
"+I[111, user_6, Hangzhou, 123567891234]",
111+
"+I[118, user_7, Shanghai, 123567891234]",
112+
"+I[121, user_8, Shanghai, 123567891234]",
113+
"+I[123, user_9, Shanghai, 123567891234]",
114+
};
115+
116+
List<String> actual =
117+
readTableSnapshotSplits(snapshotSplits, makeChangeEventTaskContext, 1, dataType);
118+
assertEqualsInAnyOrder(Arrays.asList(expected), actual);
119+
}
120+
121+
private List<String> readTableSnapshotSplits(
122+
List<SnapshotSplit> snapshotSplits,
123+
OracleSourceFetchTaskContext taskContext,
124+
int scanSplitsNum,
125+
DataType dataType)
126+
throws Exception {
127+
IncrementalSourceScanFetcher sourceScanFetcher =
128+
new IncrementalSourceScanFetcher(taskContext, 0);
129+
130+
List<SourceRecord> result = new ArrayList<>();
131+
for (int i = 0; i < scanSplitsNum; i++) {
132+
SnapshotSplit sqlSplit = snapshotSplits.get(i);
133+
if (sourceScanFetcher.isFinished()) {
134+
sourceScanFetcher.submitTask(
135+
taskContext.getDataSourceDialect().createFetchTask(sqlSplit));
136+
}
137+
Iterator<SourceRecords> res;
138+
while ((res = sourceScanFetcher.pollSplitRecords()) != null) {
139+
while (res.hasNext()) {
140+
SourceRecords sourceRecords = res.next();
141+
result.addAll(sourceRecords.getSourceRecordList());
142+
}
143+
}
144+
}
145+
146+
sourceScanFetcher.close();
147+
148+
assertNotNull(sourceScanFetcher.getExecutorService());
149+
assertTrue(sourceScanFetcher.getExecutorService().isTerminated());
150+
151+
return formatResult(result, dataType);
152+
}
153+
154+
private List<String> formatResult(List<SourceRecord> records, DataType dataType) {
155+
final RecordsFormatter formatter = new RecordsFormatter(dataType);
156+
return formatter.format(records);
157+
}
158+
159+
private List<SnapshotSplit> getSnapshotSplits(
160+
OracleSourceConfig sourceConfig, JdbcDataSourceDialect sourceDialect) {
161+
String databaseName = sourceConfig.getDatabaseList().get(0);
162+
List<TableId> tableIdList =
163+
sourceConfig.getTableList().stream()
164+
.map(tableId -> TableId.parse(databaseName + "." + tableId))
165+
.collect(Collectors.toList());
166+
final ChunkSplitter chunkSplitter = sourceDialect.createChunkSplitter(sourceConfig);
167+
168+
List<SnapshotSplit> snapshotSplitList = new ArrayList<>();
169+
for (TableId table : tableIdList) {
170+
Collection<SnapshotSplit> snapshotSplits = chunkSplitter.generateSplits(table);
171+
snapshotSplitList.addAll(snapshotSplits);
172+
}
173+
return snapshotSplitList;
174+
}
175+
176+
public static OracleSourceConfigFactory getConfigFactory(
177+
String[] captureTables, int splitSize) {
178+
Properties debeziumProperties = new Properties();
179+
debeziumProperties.setProperty("log.mining.strategy", "online_catalog");
180+
debeziumProperties.setProperty("log.mining.continuous.mine", "true");
181+
182+
return (OracleSourceConfigFactory)
183+
new OracleSourceConfigFactory()
184+
.hostname(ORACLE_CONTAINER.getHost())
185+
.port(ORACLE_CONTAINER.getOraclePort())
186+
.username(ORACLE_CONTAINER.getUsername())
187+
.password(ORACLE_CONTAINER.getPassword())
188+
.databaseList(ORACLE_DATABASE)
189+
.tableList(captureTables)
190+
.debeziumProperties(debeziumProperties)
191+
.splitSize(splitSize);
192+
}
193+
194+
private boolean executeSql(OracleSourceConfig sourceConfig, String[] sqlStatements) {
195+
JdbcConnection connection =
196+
createOracleConnection(sourceConfig.getDbzConnectorConfig().getJdbcConfig());
197+
try {
198+
connection.setAutoCommit(false);
199+
connection.execute(sqlStatements);
200+
connection.commit();
201+
} catch (SQLException e) {
202+
LOG.error("Failed to execute sql statements.", e);
203+
return false;
204+
}
205+
return true;
206+
}
207+
208+
class MakeChangeEventTaskContext extends OracleSourceFetchTaskContext {
209+
210+
private Supplier<Boolean> makeChangeEventFunction;
211+
212+
public MakeChangeEventTaskContext(
213+
JdbcSourceConfig jdbcSourceConfig,
214+
OracleDialect oracleDialect,
215+
OracleConnection connection,
216+
Supplier<Boolean> makeChangeEventFunction) {
217+
super(jdbcSourceConfig, oracleDialect, connection);
218+
this.makeChangeEventFunction = makeChangeEventFunction;
219+
}
220+
221+
@Override
222+
public EventDispatcher.SnapshotReceiver getSnapshotReceiver() {
223+
EventDispatcher.SnapshotReceiver snapshotReceiver = super.getSnapshotReceiver();
224+
return new EventDispatcher.SnapshotReceiver() {
225+
226+
@Override
227+
public void changeRecord(
228+
DataCollectionSchema schema,
229+
Envelope.Operation operation,
230+
Object key,
231+
Struct value,
232+
OffsetContext offset,
233+
ConnectHeaders headers)
234+
throws InterruptedException {
235+
snapshotReceiver.changeRecord(schema, operation, key, value, offset, headers);
236+
}
237+
238+
@Override
239+
public void completeSnapshot() throws InterruptedException {
240+
snapshotReceiver.completeSnapshot();
241+
// make change events
242+
makeChangeEventFunction.get();
243+
Thread.sleep(120 * 1000);
244+
}
245+
};
246+
}
247+
}
248+
}

0 commit comments

Comments
 (0)