大朗做网站的,jsp借书网站开发,wordpress调用分类标签,做国际网站的流程使用前提#xff1a; 一、mysql开启了logibin 在mysql的安装路径下的my.ini中 【mysqlid】下 添加 log-binmysql-bin # 开启 binlog binlog-formatROW # 选择 ROW 模式 server_id1 # 配置 MySQL replaction 需要定义#xff0c;不要和 canal 的 slaveId 重复 参考gitee的项目… 使用前提 一、mysql开启了logibin 在mysql的安装路径下的my.ini中 【mysqlid】下 添加 log-binmysql-bin # 开启 binlog binlog-formatROW # 选择 ROW 模式 server_id1 # 配置 MySQL replaction 需要定义不要和 canal 的 slaveId 重复 参考gitee的项目即拉即用。参考地址
zhanghl/spring-boot-debeziumhttps://gitee.com/zhl001/spring-boot-debezium项目中一个三个文件pom和两个类需要参考。
pom.xml
parentgroupIdorg.springframework.boot/groupIdartifactIdspring-boot-starter-parent/artifactIdversion2.2.13.RELEASE/versionrelativePath/ !-- lookup parent from repository --/parentgroupIdcn.felord/groupIdartifactIdspring-boot-debezium/artifactIdversion0.0.1-SNAPSHOT/versionnamespring-boot-debezium/namedescriptionDemo project for Spring Boot/descriptionpropertiesjava.version1.8/java.versiondebezium.version1.5.2.Final/debezium.version/propertiesdependenciesdependencygroupIdorg.springframework.boot/groupIdartifactIdspring-boot-starter-actuator/artifactId/dependencydependencygroupIdorg.springframework.boot/groupIdartifactIdspring-boot-starter-web/artifactId/dependencydependencygroupIdio.debezium/groupIdartifactIddebezium-api/artifactIdversion${debezium.version}/version/dependencydependencygroupIdio.debezium/groupIdartifactIddebezium-embedded/artifactIdversion${debezium.version}/version/dependencydependencygroupIdio.debezium/groupIdartifactIddebezium-connector-mysql/artifactIdversion${debezium.version}/version/dependencydependencygroupIdorg.projectlombok/groupIdartifactIdlombok/artifactIdoptionaltrue/optional/dependencydependencygroupIdorg.springframework.boot/groupIdartifactIdspring-boot-starter-test/artifactIdscopetest/scope/dependency!--springboot与mybatis的整合包--dependencygroupIdorg.mybatis.spring.boot/groupIdartifactIdmybatis-spring-boot-starter/artifactIdversion1.3.0/version/dependency!--mysql驱动包--dependencygroupIdmysql/groupIdartifactIdmysql-connector-java/artifactId/dependency!--springboot与JDBC整合包--dependencygroupIdorg.springframework.boot/groupIdartifactIdspring-boot-starter-jdbc/artifactId/dependency!--sqlserver数据源--dependencygroupIdcom.microsoft.sqlserver/groupIdartifactIdsqljdbc4/artifactIdversion4.0/version/dependency/dependencies两个java类
DebeziumConfiguration.java
package cn.felord.debezium.debezium;import io.debezium.connector.mysql.MySqlConnector;
import io.debezium.data.Envelope;
import io.debezium.embedded.Connect;
import io.debezium.engine.DebeziumEngine;
import io.debezium.engine.RecordChangeEvent;
import io.debezium.engine.format.ChangeEventFormat;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.kafka.connect.data.Field;
import org.apache.kafka.connect.data.Struct;
import org.apache.kafka.connect.source.SourceRecord;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;import java.util.List;
import java.util.Map;import static io.debezium.data.Envelope.FieldName.*;
import static java.util.stream.Collectors.toMap;/*** The type Debezium configuration.** author n1* since 2021 /6/1 17:01*/
Configuration
public class DebeziumConfiguration {/*** Debezium 配置.** return configuration*/Beanio.debezium.config.Configuration debeziumConfig() {return io.debezium.config.Configuration.create()
// 连接器的Java类名称.with(connector.class, MySqlConnector.class.getName())
// 偏移量持久化用来容错 默认值.with(offset.storage, org.apache.kafka.connect.storage.FileOffsetBackingStore)
// 偏移量持久化文件路径 默认/tmp/offsets.dat 如果路径配置不正确可能导致无法存储偏移量 可能会导致重复消费变更
// 如果连接器重新启动它将使用最后记录的偏移量来知道它应该恢复读取源信息中的哪个位置。.with(offset.storage.file.filename, /tmp/offsets.dat)
// 捕获偏移量的周期.with(offset.flush.interval.ms, 1)
// 连接器的唯一名称.with(name, mysql-connector)
// 数据库的hostname.with(database.hostname, 10.1.1.1)
// 端口.with(database.port, 3306)
// 用户名.with(database.user, canal)
// 密码.with(database.password, canal)
// 包含的数据库列表,你的数据库.with(database.include.list, md_test)
// 是否包含数据库表结构层面的变更建议使用默认值true.with(include.schema.changes, false)
// mysql.cnf 配置的 server-id.with(database.server.id, 1)
// MySQL 服务器或集群的逻辑名称.with(database.server.name, customer-mysql-db-server)
// 历史变更记录.with(database.history, io.debezium.relational.history.FileDatabaseHistory)
// 历史变更记录存储位置存储DDL.with(database.history.file.filename, /tmp/dbhistory.dat).build();}/*** Debezium server bootstrap debezium server bootstrap.** param configuration the configuration* return the debezium server bootstrap*/BeanDebeziumServerBootstrap debeziumServerBootstrap(io.debezium.config.Configuration configuration) {DebeziumServerBootstrap debeziumServerBootstrap new DebeziumServerBootstrap();DebeziumEngineRecordChangeEventSourceRecord debeziumEngine DebeziumEngine.create(ChangeEventFormat.of(Connect.class)).using(configuration.asProperties()).notifying(this::handlePayload).build();debeziumServerBootstrap.setDebeziumEngine(debeziumEngine);return debeziumServerBootstrap;}private void handlePayload(ListRecordChangeEventSourceRecord recordChangeEvents, DebeziumEngine.RecordCommitterRecordChangeEventSourceRecord recordCommitter) {recordChangeEvents.forEach(r - {SourceRecord sourceRecord r.record();String topic sourceRecord.topic();Struct sourceRecordChangeValue (Struct) sourceRecord.value();if (sourceRecordChangeValue ! null) {// 判断操作的类型 过滤掉读 只处理增删改 这个其实可以在配置中设置Envelope.Operation operation Envelope.Operation.forCode((String) sourceRecordChangeValue.get(OPERATION));if (operation ! Envelope.Operation.READ) {String record operation Envelope.Operation.DELETE ? BEFORE : AFTER;// 获取增删改对应的结构体数据Struct struct (Struct) sourceRecordChangeValue.get(record);// 将变更的行封装为MapMapString, Object payload struct.schema().fields().stream().map(Field::name).filter(fieldName - struct.get(fieldName) ! null).map(fieldName - Pair.of(fieldName, struct.get(fieldName))).collect(toMap(Pair::getKey, Pair::getValue));// 这里简单打印一下System.out.println(operation operation);System.out.println(data payload);if(operation.toString().equals(CREATE)){System.out.println(新增记录一条);}//tabelNameif(topic.split(\\.).length 2){String tableName topic.split(\\.)[2];System.out.println(tabelName tableName);}}}});}}DebeziumServerBootstrap.java
package cn.felord.debezium.debezium;import io.debezium.engine.DebeziumEngine;
import lombok.Data;
import lombok.SneakyThrows;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.context.SmartLifecycle;
import org.springframework.util.Assert;import java.util.concurrent.Executor;
import java.util.concurrent.Executors;/*** author n1* since 2021/6/2 10:45*/
Data
public class DebeziumServerBootstrap implements InitializingBean, SmartLifecycle {private final Executor executor Executors.newSingleThreadExecutor();private DebeziumEngine? debeziumEngine;Overridepublic void start() {executor.execute(debeziumEngine);}SneakyThrowsOverridepublic void stop() {debeziumEngine.close();}Overridepublic boolean isRunning() {return false;}Overridepublic void afterPropertiesSet() throws Exception {Assert.notNull(debeziumEngine, debeziumEngine must not be null);}
}结语 太强了比canal强10倍非侵入性。对比可参考
不想引入MQ不妨试试 Debezium-CSDN博客 为什么是debezium 这么多技术框架为什么选debezium 看起来很多。但一一排除下来就debezium和canal。 sqoop,kettle,datax之类的工具属于前大数据时代的产物地位类似于web领域的structs2。而且它们基于查询而非binlog日志其实不属于CDC。首先排除。 flink cdc是大数据领域的框架一般web项目的数据量属于大材小用了。 同时databus,maxwell相对比较冷门用得比较少。 「最后不用canal的原因有以下几点」 canal需要安装这违背了“如非必要勿增实体”的原则。 canal只能对MYSQL进行CDC监控。有很大的局限性。 大数据领域非常流行的flink cdc(阿里团队主导)底层使用的也是debezium而非同是阿里出品的canal。 debezium可借助kafka组件将变动的数据发到kafka topic后续的读取操作只需读取kafka,可有效减少数据库的读取压力。可保证一次语义至少一次语义。 同时也可基于内嵌部署模式无需我们手动部署kafka集群可满足”如非必要勿增实体“的原则。 而且canal只支持源端MySQL版本包括 5.1.x , 5.5.x , 5.6.x , 5.7.x , 8.0.x。 实时监视同步数据库变更这个框架真是神器_Mysql