liaojiexin opened a new issue, #25182:
URL: https://github.com/apache/shardingsphere/issues/25182
sharding-proxy varsion:5.3.2
Here's what I did,
```bash
# 172.28.0.2为shardingsphere-proxy服务器的地址,最后postgres为数据库
psql -h 172.28.0.2 -p 3308 -U root postgres
# 创建sharding_db数据库
CREATE DATABASE sharding_db;
#进入数据库
\c sharding_db
# 配置好存储单元
REGISTER STORAGE UNIT ds_0 (
URL="jdbc:postgresql://172.28.0.10:5432/postgres",
USER="postgres",
PASSWORD="201314",
PROPERTIES("minPoolSize"="1","maxPoolSize"="20","idleTimeout"="60000")
), ds_1 (
URL="jdbc:postgresql://172.28.0.20:5432/postgres",
USER="postgres",
PASSWORD="201314",
PROPERTIES("minPoolSize"="1","maxPoolSize"="20","idleTimeout"="60000")
);
# 配置分片规则
# 配置ds_attach_node表的分片规则
CREATE SHARDING TABLE RULE ds_attach_node (
DATANODES("ds_${0..1}.ds_attach_node_${0..1}"),
DATABASE_STRATEGY(TYPE="standard",SHARDING_COLUMN=bimid,SHARDING_ALGORITHM(TYPE(NAME="inline",PROPERTIES("algorithm-expression"="ds_$->{(bimid.hashCode()
& Integer.MAX_VALUE %4 ).intdiv(2)}")))),
TABLE_STRATEGY(TYPE="standard",SHARDING_COLUMN=bimid,SHARDING_ALGORITHM(TYPE(NAME="inline",PROPERTIES("algorithm-expression"="ds_attach_node_$->{bimid.hashCode()
& Integer.MAX_VALUE %4-(((bimid.hashCode() & Integer.MAX_VALUE
%4).intdiv(2)))*2}")))));
# 配置ds_attach_metadata的分片算法
CREATE SHARDING TABLE RULE ds_attach_metadata (
DATANODES("ds_${0..1}.ds_attach_metadata_${0..1}"),
DATABASE_STRATEGY(TYPE="standard",SHARDING_COLUMN=bimid,SHARDING_ALGORITHM(TYPE(NAME="inline",PROPERTIES("algorithm-expression"="ds_$->{(bimid.hashCode()
& Integer.MAX_VALUE %4 ).intdiv(2)}")))),
TABLE_STRATEGY(TYPE="standard",SHARDING_COLUMN=bimid,SHARDING_ALGORITHM(TYPE(NAME="inline",PROPERTIES("algorithm-expression"="ds_attach_metadata_$->{bimid.hashCode()
& Integer.MAX_VALUE %4-(((bimid.hashCode() & Integer.MAX_VALUE
%4).intdiv(2)))*2}")))));
# 配置dl_attach_operate_log的分片算法
CREATE SHARDING TABLE RULE dl_attach_operate_log (
DATANODES("ds_${0..1}.dl_attach_operate_log_${0..1}"),
DATABASE_STRATEGY(TYPE="standard",SHARDING_COLUMN=bimid,SHARDING_ALGORITHM(TYPE(NAME="inline",PROPERTIES("algorithm-expression"="ds_$->{(bimid.hashCode()
& Integer.MAX_VALUE %4 ).intdiv(2)}")))),
TABLE_STRATEGY(TYPE="standard",SHARDING_COLUMN=bimid,SHARDING_ALGORITHM(TYPE(NAME="inline",PROPERTIES("algorithm-expression"="dl_attach_operate_log_$->{bimid.hashCode()
& Integer.MAX_VALUE %4-(((bimid.hashCode() & Integer.MAX_VALUE
%4).intdiv(2)))*2}")))));
# 配置ds_attach_renamelog的分片算法
CREATE SHARDING TABLE RULE ds_attach_renamelog (
DATANODES("ds_${0..1}.ds_attach_renamelog_${0..1}"),
DATABASE_STRATEGY(TYPE="standard",SHARDING_COLUMN=bimid,SHARDING_ALGORITHM(TYPE(NAME="inline",PROPERTIES("algorithm-expression"="ds_$->{(bimid.hashCode()
& Integer.MAX_VALUE %4 ).intdiv(2)}")))),
TABLE_STRATEGY(TYPE="standard",SHARDING_COLUMN=bimid,SHARDING_ALGORITHM(TYPE(NAME="inline",PROPERTIES("algorithm-expression"="ds_attach_renamelog_$->{bimid.hashCode()
& Integer.MAX_VALUE %4-(((bimid.hashCode() & Integer.MAX_VALUE
%4).intdiv(2)))*2}")))));
# 配置ds_attach_extend的分片算法
CREATE SHARDING TABLE RULE ds_attach_extend (
STORAGE_UNITS(ds_0,ds_1),
SHARDING_COLUMN=id,TYPE(NAME="hash_mod",PROPERTIES("sharding-count"="2"))
);
# 配置ds_attach_segment的分片算法
CREATE SHARDING TABLE RULE ds_attach_segment (
STORAGE_UNITS(ds_0,ds_1),
SHARDING_COLUMN=md5,TYPE(NAME="hash_mod",PROPERTIES("sharding-count"="2"))
);
# 配置源存储单元
REGISTER MIGRATION SOURCE STORAGE UNIT ds (
URL="jdbc:postgresql://172.28.0.3:5432/postgres",
USER="postgres",
PASSWORD="201314",
PROPERTIES("minPoolSize"="1","maxPoolSize"="20","idleTimeout"="60000")
);
# 启动数据迁移
MIGRATE TABLE ds.ds_attach_node into sharding_db.ds_attach_node;
MIGRATE TABLE ds.ds_attach_metadata into sharding_db.ds_attach_metadata;
MIGRATE TABLE ds.dl_attach_operate_log into
sharding_db.dl_attach_operate_log;
MIGRATE TABLE ds.ds_attach_renamelog into sharding_db.ds_attach_renamelog;
MIGRATE TABLE ds.ds_attach_extend into sharding_db.ds_attach_extend;
MIGRATE TABLE ds.ds_attach_segment into sharding_db.ds_attach_segment;
# 数据迁移作业列表
SHOW MIGRATION LIST;
id | tables |
job_item_count | active | create_time | stop_time
---------------------------------------+---------------------------------+----------------+--------+---------------------+-----------
j01012201fb9d7a83d2ebb66cff4296804f98 | ds.public.ds_attach_extend | 1
| true | 2023-04-16 13:04:35 |
j01012c3dbb074ebd2bf8befd6b248b6ebe25 | ds.public.ds_attach_metadata | 1
| true | 2023-04-16 13:04:35 |
j010154a3756a17017fa3f1f9da8f2558457a | ds.public.ds_attach_renamelog | 1
| true | 2023-04-16 13:04:35 |
j0101a5a65a89e9b474a4d206bfcdc24cc299 | ds.public.dl_attach_operate_log | 1
| true | 2023-04-16 13:04:35 |
j0101cc68f73c67b7c2e23d3c2125da32fb05 | ds.public.ds_attach_segment | 1
| true | 2023-04-16 13:04:35 |
j0101d0166123c13a1feb02f2c0a366d48da8 | ds.public.ds_attach_node | 1
| true | 2023-04-16 13:04:35 |
(6 rows)
```
But I have two problems
The first one is **ds_attach_metadata**
```bash
sharding_db=> SHOW MIGRATION STATUS 'j01012c3dbb074ebd2bf8befd6b248b6ebe25';
item | data_source | tables | status
| active | processed_records_count | inventory_finished_percentage |
incremental_idle_seconds
| error_message
------+-------------+------------------------------+--------------------------------+--------+-------------------------+-------------------------------+--------------------------
+---------------------------------------------------------------------------------------------------------------------------------------
0 | ds | ds.public.ds_attach_metadata |
EXECUTE_INVENTORY_TASK_FAILURE | false | 0 | 0
|
|
org.apache.shardingsphere.data.pipeline.core.exception.job.PipelineImporterJobWriteException:
Importer job write data failed. +
| | |
| | | |
| at
org.apache.shardingsphere.infra.util.exception.ShardingSpherePreconditions.checkState(ShardingSpherePreconditions.java:41)+
| | |
| | | |
| at
org.apache.shardingsphere.data.pipeline.core.importer.DataSourceImporter.flushInternal(DataSourceImporter.java:134)
+
| | |
| | | |
| at
org.apache.shardingsphere.data.pipeline.core.importer.DataSourceImporter.flush(DataSourceImporter.java:123)
+
| | |
| | | |
| at
org.apache.shardingsphere.data.pipeline.core.importer.DataSourceImporter.runBlocking(DataSourceImporter.java:99)
+
| | |
| | | |
| at
org.apache.shardingsphere.data.pipeline.api.executor.AbstractLifecycleExecutor.start(AbstractLifecycleExecutor.java:52)
+
| | |
| | | |
| at
org.apache.shardingsphere.data.pipeline.api.executor.AbstractLifecycleExecutor.run(AbstractLifecycleExecutor.java:90)
+
| | |
| | | |
| at
java.base/java.util.concurrent.CompletableFuture$AsyncRun.run(CompletableFuture.java:1804)
+
| | |
| | | |
| at
java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136)
+
| | |
| | | |
| at
java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635)
+
| | |
| | | |
| at java.base/java.lang.Thread.run(Thread.java:833)
+
| | |
| | | |
|
(1 row)
```
The second is **ds_attach_segment** and **ds_attach_extend** seems to
migrate successfully,but But I can't see the data in the ds_0 and ds_1,
```bash
sharding_db=> SHOW MIGRATION CHECK STATUS
'j0101cc68f73c67b7c2e23d3c2125da32fb05';
tables | result | check_failed_tables |
finished_percentage | remaining_seconds | check_begin_time |
check_end_time | duration_seconds | err
or_message
-----------------------------+--------+---------------------+---------------------+-------------------+-------------------------+-------------------------+------------------+----
-----------
ds.public.ds_attach_segment | true | | 100
| 0 | 2023-04-16 14:02:35.413 | 2023-04-16 14:02:39.803 |
4 |
(1 row)
```
Except that, other table is normal
Please how should I deal with these two issues
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail:
[email protected]
For queries about this service, please contact Infrastructure at:
[email protected]