wsm12138 commented on issue #18829:
URL:
https://github.com/apache/shardingsphere/issues/18829#issuecomment-1173695258
# when storage_nodes is MySQL.MGR, there have a new error
"io.netty.handler.codec.DecoderException:
java.lang.UnsupportedOperationException: Do not parse binlog event fully,
eventHeader"
# error log
```
[INFO ] 2022-07-04 18:53:08.779
[ShardingSphere-Scaling-Incremental-0130317c30317c3054317c7368617264696e675f6462-0]
o.a.s.d.p.a.e.AbstractLifecycleExecutor - start lifecycle executor:
org.apache.shardingsphere.data.pipeline.mysql.ingest.MySQLIncrementalDumper@9bdfba5
[INFO ] 2022-07-04 18:53:08.779
[ShardingSphere-Scaling-Incremental-0130317c30317c3054317c7368617264696e675f6462-0]
o.a.s.d.p.m.i.MySQLIncrementalDumper - incremental dump,
jdbcUrl=jdbc:mysql://117.48.121.18:33308/scaling_ds_0?jdbcCompliantTruncation=false&yearIsDateType=false&noDatetimeStringSync=true&serverTimezone=UTC&zeroDateTimeBehavior=convertToNull&rewriteBatchedStatements=true&useSSL=false
[ERROR] 2022-07-04 18:53:08.941 [nioEventLoopGroup-4-1]
o.a.s.d.p.m.i.client.MySQLClient - protocol resolution error
io.netty.handler.codec.DecoderException:
java.lang.UnsupportedOperationException: Do not parse binlog event fully,
eventHeader: MySQLBinlogEventHeader(timestamp=1656926378, eventType=15,
serverId=133308, eventSize=121, logPos=0, flags=0), remaining packet 9ee343ea
at
io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:480)
at
io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:279)
at
io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)
at
io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)
at
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)
at
io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:327)
at
io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:299)
at
io.netty.handler.codec.ByteToMessageCodec.channelRead(ByteToMessageCodec.java:103)
at
io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)
at
io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)
at
io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:357)
at
io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1410)
at
io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:379)
at
io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:365)
at
io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:919)
at
io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166)
at
io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:722)
at
io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:658)
at
io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:584)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:496)
at
io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:986)
at
io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
at
io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.UnsupportedOperationException: Do not parse binlog
event fully, eventHeader: MySQLBinlogEventHeader(timestamp=1656926378,
eventType=15, serverId=133308, eventSize=121, logPos=0, flags=0), remaining
packet 9ee343ea
at
org.apache.shardingsphere.data.pipeline.mysql.ingest.client.netty.MySQLBinlogEventPacketDecoder.decode(MySQLBinlogEventPacketDecoder.java:89)
at
io.netty.handler.codec.ByteToMessageDecoder.decodeRemovalReentryProtection(ByteToMessageDecoder.java:510)
at
io.netty.handler.codec.ByteToMessageDecoder.callDecode(ByteToMessageDecoder.java:449)
... 23 common frames omitted
[INFO ] 2022-07-04 18:53:08.942 [nioEventLoopGroup-4-1]
o.a.s.d.p.m.i.client.MySQLClient - reconnect mysql client.
[INFO ] 2022-07-04 18:53:20.148 [_finished_check_Worker-1]
o.a.s.d.p.core.job.FinishedCheckJob - completionDetector not configured, auto
switch will not be enabled. You could query job progress and switch config
manually with DistSQL.
[INFO ] 2022-07-04 18:53:30.144 [_finished_check_Worker-1]
o.a.s.d.p.core.job.FinishedCheckJob - completionDetector not configured, auto
switch will not be enabled. You could query job progress and switch config
manually with DistSQL.
[INFO ] 2022-07-04 18:53:40.046 [_finished_check_Worker-1]
o.a.s.d.p.core.job.FinishedCheckJob - completionDetector not configured, auto
switch will not be enabled. You could query job progress and switch config
manually with DistSQL.
```
# Steps to reproduce the behavior
#### server.yaml
```
mode:
type: Cluster
repository:
type: ZooKeeper
props:
namespace: governance_ds
server-lists: localhost:2181
retryIntervalMilliseconds: 500
timeToLiveSeconds: 60
maxRetries: 3
operationTimeoutMilliseconds: 500
overwrite: true
rules:
- !AUTHORITY
users:
- user: root@%
password: root
- user: sharding
password: sharding
privilege:
type: ALL_PERMITTED
```
#### config-sharding.yaml
```
databaseName: sharding_db
dataSources:
ds_0:
url: jdbc:mysql://ip:33308/scaling_ds_0?serverTimezone=UTC&useSSL=false
username: root
password: 123456
connectionTimeoutMilliseconds: 3000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
minPoolSize: 1
ds_1:
url: jdbc:mysql://:33307/scaling_ds_0?serverTimezone=UTC&useSSL=false
username: root
password: 123456
connectionTimeoutMilliseconds: 3000
idleTimeoutMilliseconds: 60000
maxLifetimeMilliseconds: 1800000
maxPoolSize: 50
minPoolSize: 1
rules:
- !SHARDING
autoTables:
t_order:
actualDataSources: readwrite
keyGenerateStrategy:
column: order_id
keyGeneratorName: t_order_snowflake
logicTable: t_order
shardingStrategy:
standard:
shardingAlgorithmName: t_order_hash_mod
shardingColumn: order_id
shardingAlgorithms:
t_order_inline:
props:
algorithm-expression: t_order_${order_id % 2}
type: INLINE
t_order_hash_mod:
props:
sharding-count: '2'
type: hash_mod
keyGenerators:
snowflake:
type: SNOWFLAKE
scalingName: default_scaling
scaling:
default_scaling:
input:
workerThread: 40
batchSize: 1000
output:
workerThread: 40
batchSize: 1000
streamChannel:
type: MEMORY
props:
block-queue-size: 10000
dataConsistencyChecker:
type: DATA_MATCH
props:
chunk-size: 1000
- !READWRITE_SPLITTING
dataSources:
readwrite:
type: Dynamic
props:
auto-aware-data-source-name: replica_ds
- !DB_DISCOVERY
dataSources:
replica_ds:
dataSourceNames:
- ds_0
- ds_1
discoveryHeartbeatName: mgr-heartbeat
discoveryTypeName: mgr
discoveryHeartbeats:
mgr-heartbeat:
props:
keep-alive-cron: '0/5 * * * * ?'
discoveryTypes:
mgr:
type: MySQL.MGR
props:
group-name: 558edd3c-02ec-11ea-9bb3-080027e39bd2
```
#### proxy excute sql
```
CREATE TABLE t_order (order_id bigint NOT NULL, user_id int DEFAULT NULL,
status varchar(50) DEFAULT NULL, t_numeric numeric(10,2) DEFAULT NULL, PRIMARY
KEY (order_id));
ALTER SHARDING TABLE RULE
t_order(RESOURCES(source_readwrite,target_readwrite),SHARDING_COLUMN=order_id,TYPE(NAME=hash_mod,PROPERTIES('sharding-count'=6)),KEY_GENERATE_STRATEGY(COLUMN=order_id,TYPE(NAME=snowflake)));
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]