import com.alibaba.otter.canal.client.CanalConnector;
import com.alibaba.otter.canal.client.CanalConnectors;
import com.alibaba.otter.canal.protocol.Message;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
public class CanalKafkaExample {
public static void main(String args[]) {
// 连接Canal服务器
CanalConnector connector = CanalConnectors.newSingleConnector(
new InetSocketAddress(AddressUtils.getHostIp(),
11111), "example", "", "");
// 启动连接
connector.connect();
connector.subscribe(".*\\..*");
connector.rollback();
try {
while (true) {
// 获取指定数量的数据
Message message = connector.getWithoutAck(100);
long batchId = message.getId();
if (batchId == -1 || message.getEntries().isEmpty()) {
// 没有数据,休眠一会儿
Thread.sleep(1000);
} else {
// 处理数据
dataHandle(message, connector);
}
connector.ack(batchId);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
connector.disconnect();
}
}
private static void dataHandle(Message message, CanalConnector connector) {
for (Message.Entry entry : message.getEntries()) {
if (entry.getEntryType() == Message.EntryType.ROWDATA) {
// 获取数据库名和表名
String databaseName = entry.getHeader().getSchemaName();
String tableName = entry.getHeader().getTableName();
// 遍历每一行数据
for (Message.RowChange.Row row : entry.getRowChanges()) {
// 根据不同的操作类型进行不同的处理
switch (row.getAction()) {
case INSERT:
case UPDATE:
case DELETE:
// 发送到Kafka
sendToKafka(databaseName, tableName, row);
break;
default:
break;
评论已关闭