This is an automated email from the ASF dual-hosted git repository.

liyang pushed a commit to branch kylin5
in repository https://gitbox.apache.org/repos/asf/kylin.git

commit 1c8fb3d9b5fa4a37838435433b429ee99d00c9e7
Author: Yinghao Lin <[email protected]>
AuthorDate: Fri Sep 13 19:28:39 2024 +0800

    KYLIN-5970 Support kylin5 standalone docker
---
 build/bin/sample.sh                                |  19 +-
 build/sample_project/template/UUID                 | Bin 38 -> 0 bytes
 .../project => core_meta/PROJECT}/learn_kylin.json |  11 +-
 .../TABLE_INFO/learn_kylin.SSB.CUSTOMER.json}      |  16 +-
 .../TABLE_INFO/learn_kylin.SSB.DATES.json}         |  16 +-
 .../TABLE_INFO/learn_kylin.SSB.LINEORDER.json}     |  25 +-
 .../TABLE_INFO/learn_kylin.SSB.PART.json}          |  16 +-
 .../TABLE_INFO/learn_kylin.SSB.P_LINEORDER.json}   |  16 +-
 .../TABLE_INFO/learn_kylin.SSB.SUPPLIER.json}      |  16 +-
 .../2d07e878-da28-a203-2d2c-185b4c6656f1.json      |  12 -
 .../2d07e878-da28-a203-2d2c-185b4c6656f1.json      | 111 ------
 .../2d07e878-da28-a203-2d2c-185b4c6656f1.json      | 408 --------------------
 .../release-manager/standalone-docker/.gitignore   |   2 +-
 .../standalone-docker/all-in-one/Dockerfile        |  70 ++++
 .../{all_in_one => all-in-one}/README.md           |   0
 .../standalone-docker/all-in-one/build.sh          |  35 ++
 .../all-in-one/conf/hadoop/capacity-scheduler.xml  |  77 ++++
 .../conf/hadoop/core-site.xml                      |   5 +-
 .../all-in-one/conf/hadoop/hadoop-env.sh           | 423 +++++++++++++++++++++
 .../conf/hadoop/hdfs-site.xml                      |   8 -
 .../conf/hadoop/mapred-site.xml}                   |   9 +-
 .../conf/hadoop}/yarn-site.xml                     |  54 ++-
 .../all-in-one/conf/hive/hive-env.sh               |  54 +++
 .../all-in-one/conf/hive/hive-site.xml             |  67 ++++
 .../conf/kylin/kylin.properties.override           |  60 +--
 .../standalone-docker/all-in-one/conf/ssh/ssh.conf |   2 +
 .../all-in-one/conf/ssh/sshd.conf                  |   1 +
 .../all-in-one/conf/zookeeper/zoo.cfg              |  36 ++
 .../dev-docker/Dockerfile_kylin_dev                |   0
 .../dev-docker/build_and_run.sh                    |   0
 .../dev-docker/entrypoint-dev.sh                   |   0
 .../standalone-docker/all-in-one/run.sh            |  16 +
 .../all-in-one/scripts/entrypoint.sh               | 139 +++++++
 .../scripts/install-mysql.sh}                      |   2 +-
 .../all-in-one/scripts/start-historyserver.sh      |   3 +
 .../all-in-one/scripts/start-hivemetastore.sh      |   9 +
 .../all-in-one/scripts/start-hiveserver2.sh        |   9 +
 .../standalone-docker/all_in_one/Dockerfile_hadoop |  73 ----
 .../standalone-docker/all_in_one/Dockerfile_kylin  |  48 ---
 .../standalone-docker/all_in_one/build_and_run.sh  |  81 ----
 .../conf/hadoop-dev/capacity-scheduler.xml         | 134 -------
 .../all_in_one/conf/hadoop-dev/hdfs-site.xml       |  36 --
 .../all_in_one/conf/hadoop-dev/mapred-site.xml     |  20 -
 .../all_in_one/conf/hadoop/capacity-scheduler.xml  | 134 -------
 .../all_in_one/conf/hadoop/mapred-site.xml         |  20 -
 .../all_in_one/conf/hadoop/yarn-site.xml           |  56 ---
 .../all_in_one/conf/hive/hive-site.xml             |  44 ---
 .../conf/kylin-dev/kylin.properties.override       |  75 ----
 .../standalone-docker/all_in_one/conf/mysql/my.cnf |   4 -
 .../standalone-docker/all_in_one/conf/zk/zoo.cfg   |  45 ---
 .../all_in_one/scripts/entrypoint.sh               | 121 ------
 51 files changed, 1095 insertions(+), 1543 deletions(-)

diff --git a/build/bin/sample.sh b/build/bin/sample.sh
index 30ea871088..78cba57b97 100644
--- a/build/bin/sample.sh
+++ b/build/bin/sample.sh
@@ -108,21 +108,18 @@ fi
 echo "Sample hive tables are created successfully; Going to create sample 
project..."
 
 mkdir -p ${KYLIN_HOME}/sample_project/sample_model/metadata
-cp -rf ${KYLIN_HOME}/sample_project/template/* 
${KYLIN_HOME}/sample_project/sample_model/metadata
+cp -R ${KYLIN_HOME}/sample_project/template/core_meta 
${KYLIN_HOME}/sample_project/sample_model/metadata
 
 #### Add version info into model
 kylin_version=5.0.0.0
 echo "kylin version is "$kylin_version
-sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/_global/project/learn_kylin.json
-sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/learn_kylin/dataflow/2d07e878-da28-a203-2d2c-185b4c6656f1.json
-sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/learn_kylin/index_plan/2d07e878-da28-a203-2d2c-185b4c6656f1.json
-sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/learn_kylin/model_desc/2d07e878-da28-a203-2d2c-185b4c6656f1.json
-
-sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/learn_kylin/table/SSB.CUSTOMER.json
-sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/learn_kylin/table/SSB.DATES.json
-sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/learn_kylin/table/SSB.P_LINEORDER.json
-sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/learn_kylin/table/SSB.PART.json
-sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/learn_kylin/table/SSB.SUPPLIER.json
+sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/core_meta/PROJECT/learn_kylin.json
+sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/core_meta/TABLE_INFO/learn_kylin.SSB.CUSTOMER.json
+sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/core_meta/TABLE_INFO/learn_kylin.SSB.DATES.json
+sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/core_meta/TABLE_INFO/learn_kylin.SSB.LINEORDER.json
+sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/core_meta/TABLE_INFO/learn_kylin.SSB.P_LINEORDER.json
+sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/core_meta/TABLE_INFO/learn_kylin.SSB.PART.json
+sed -i "s/%default_version%/${kylin_version}/g" 
${KYLIN_HOME}/sample_project/sample_model/metadata/core_meta/TABLE_INFO/learn_kylin.SSB.SUPPLIER.json
 
 function printImportResult() {
   error=$1
diff --git a/build/sample_project/template/UUID 
b/build/sample_project/template/UUID
deleted file mode 100644
index 9713b74ecb..0000000000
Binary files a/build/sample_project/template/UUID and /dev/null differ
diff --git a/build/sample_project/template/_global/project/learn_kylin.json 
b/build/sample_project/template/core_meta/PROJECT/learn_kylin.json
similarity index 81%
rename from build/sample_project/template/_global/project/learn_kylin.json
rename to build/sample_project/template/core_meta/PROJECT/learn_kylin.json
index 6b68c5e60c..1104e253dd 100644
--- a/build/sample_project/template/_global/project/learn_kylin.json
+++ b/build/sample_project/template/core_meta/PROJECT/learn_kylin.json
@@ -1,12 +1,12 @@
 {
-  "uuid" : "cc087b95-78b9-f3f6-ee2d-8b4bdc4fbb2b",
-  "last_modified" : 1632293787433,
-  "create_time" : 1632293787433,
+  "uuid" : "6e8e4b33-5873-1a63-799c-3356793620d3",
+  "last_modified" : 1726220537212,
+  "create_time" : 1726220531678,
   "version" : "%default_version%",
   "name" : "learn_kylin",
   "owner" : "ADMIN",
   "status" : "ENABLED",
-  "create_time_utc" : 1632293787433,
+  "create_time_utc" : 1726220531681,
   "default_database" : "DEFAULT",
   "description" : "",
   "principal" : null,
@@ -15,6 +15,7 @@
   "override_kylin_properties" : {
     "kylin.metadata.semi-automatic-mode" : "false",
     "kylin.query.metadata.expose-computed-column" : "true",
+    "kylin.internal-table-enabled" : "true",
     "kylin.source.default" : "9"
   },
   "segment_config" : {
@@ -32,4 +33,4 @@
     },
     "create_empty_segment_enabled" : false
   }
-}
\ No newline at end of file
+}
diff --git a/build/sample_project/template/learn_kylin/table/SSB.CUSTOMER.json 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.CUSTOMER.json
similarity index 81%
rename from build/sample_project/template/learn_kylin/table/SSB.CUSTOMER.json
rename to 
build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.CUSTOMER.json
index 2522c05dd1..6687e071fb 100644
--- a/build/sample_project/template/learn_kylin/table/SSB.CUSTOMER.json
+++ 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.CUSTOMER.json
@@ -1,8 +1,10 @@
 {
-  "uuid" : "6ce4718b-ef9d-18ae-a1bf-5df2578eaa03",
+  "uuid" : "ccc282c6-50a7-0ddf-8e44-d929f69bc3bf",
   "last_modified" : 0,
-  "create_time" : 1632293818428,
+  "create_time" : 1726220561417,
   "version" : "%default_version%",
+  "project" : "learn_kylin",
+  "database" : "SSB",
   "name" : "CUSTOMER",
   "columns" : [ {
     "id" : "1",
@@ -47,6 +49,7 @@
   } ],
   "source_type" : 9,
   "table_type" : "EXTERNAL",
+  "has_Internal" : false,
   "top" : false,
   "increment_loading" : false,
   "last_snapshot_path" : null,
@@ -55,9 +58,14 @@
   "query_hit_count" : 0,
   "partition_column" : null,
   "snapshot_partitions" : { },
+  "snapshot_partitions_info" : { },
+  "snapshot_total_rows" : 0,
   "snapshot_partition_col" : null,
   "selected_snapshot_partition_col" : null,
   "temp_snapshot_path" : null,
   "snapshot_has_broken" : false,
-  "database" : "SSB"
-}
\ No newline at end of file
+  "transactional" : false,
+  "rangePartition" : false,
+  "partition_desc" : null,
+  "table_comment" : null
+}
diff --git a/build/sample_project/template/learn_kylin/table/SSB.DATES.json 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.DATES.json
similarity index 88%
rename from build/sample_project/template/learn_kylin/table/SSB.DATES.json
rename to 
build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.DATES.json
index dcd9335a27..ad738e8831 100644
--- a/build/sample_project/template/learn_kylin/table/SSB.DATES.json
+++ 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.DATES.json
@@ -1,8 +1,10 @@
 {
-  "uuid" : "f4385049-4983-b222-060e-a488297b58f7",
+  "uuid" : "f09153f1-1a07-e27c-611c-7662d8f5af26",
   "last_modified" : 0,
-  "create_time" : 1632293817278,
+  "create_time" : 1726220561245,
   "version" : "%default_version%",
+  "project" : "learn_kylin",
+  "database" : "SSB",
   "name" : "DATES",
   "columns" : [ {
     "id" : "1",
@@ -92,6 +94,7 @@
   } ],
   "source_type" : 9,
   "table_type" : "EXTERNAL",
+  "has_Internal" : false,
   "top" : false,
   "increment_loading" : false,
   "last_snapshot_path" : null,
@@ -100,9 +103,14 @@
   "query_hit_count" : 0,
   "partition_column" : null,
   "snapshot_partitions" : { },
+  "snapshot_partitions_info" : { },
+  "snapshot_total_rows" : 0,
   "snapshot_partition_col" : null,
   "selected_snapshot_partition_col" : null,
   "temp_snapshot_path" : null,
   "snapshot_has_broken" : false,
-  "database" : "SSB"
-}
\ No newline at end of file
+  "transactional" : false,
+  "rangePartition" : false,
+  "partition_desc" : null,
+  "table_comment" : null
+}
diff --git 
a/build/sample_project/template/learn_kylin/table/SSB.P_LINEORDER.json 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.LINEORDER.json
similarity index 86%
copy from build/sample_project/template/learn_kylin/table/SSB.P_LINEORDER.json
copy to 
build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.LINEORDER.json
index 9320e26b85..6b26d5b36c 100644
--- a/build/sample_project/template/learn_kylin/table/SSB.P_LINEORDER.json
+++ 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.LINEORDER.json
@@ -1,9 +1,11 @@
 {
-  "uuid" : "f077883d-5700-9305-988c-a1f0438f2c83",
+  "uuid" : "229b72d3-b959-d8ef-18b5-7859627ea3cd",
   "last_modified" : 0,
-  "create_time" : 1632293818812,
+  "create_time" : 1726220561529,
   "version" : "%default_version%",
-  "name" : "P_LINEORDER",
+  "project" : "learn_kylin",
+  "database" : "SSB",
+  "name" : "LINEORDER",
   "columns" : [ {
     "id" : "1",
     "name" : "LO_ORDERKEY",
@@ -89,14 +91,10 @@
     "name" : "LO_SHIPMODE",
     "datatype" : "varchar(4096)",
     "case_sensitive_name" : "lo_shipmode"
-  }, {
-    "id" : "18",
-    "name" : "V_REVENUE",
-    "datatype" : "bigint",
-    "case_sensitive_name" : "v_revenue"
   } ],
   "source_type" : 9,
-  "table_type" : "VIEW",
+  "table_type" : "EXTERNAL",
+  "has_Internal" : false,
   "top" : false,
   "increment_loading" : false,
   "last_snapshot_path" : null,
@@ -105,9 +103,14 @@
   "query_hit_count" : 0,
   "partition_column" : null,
   "snapshot_partitions" : { },
+  "snapshot_partitions_info" : { },
+  "snapshot_total_rows" : 0,
   "snapshot_partition_col" : null,
   "selected_snapshot_partition_col" : null,
   "temp_snapshot_path" : null,
   "snapshot_has_broken" : false,
-  "database" : "SSB"
-}
\ No newline at end of file
+  "transactional" : false,
+  "rangePartition" : false,
+  "partition_desc" : null,
+  "table_comment" : null
+}
diff --git a/build/sample_project/template/learn_kylin/table/SSB.PART.json 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.PART.json
similarity index 82%
rename from build/sample_project/template/learn_kylin/table/SSB.PART.json
rename to 
build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.PART.json
index e628057eb5..df1c39370b 100644
--- a/build/sample_project/template/learn_kylin/table/SSB.PART.json
+++ 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.PART.json
@@ -1,8 +1,10 @@
 {
-  "uuid" : "99f57020-140a-95df-5b92-d41459f3a99b",
+  "uuid" : "634c9560-bf98-9c40-5568-a4d4605e15ae",
   "last_modified" : 0,
-  "create_time" : 1632293817614,
+  "create_time" : 1726220561317,
   "version" : "%default_version%",
+  "project" : "learn_kylin",
+  "database" : "SSB",
   "name" : "PART",
   "columns" : [ {
     "id" : "1",
@@ -52,6 +54,7 @@
   } ],
   "source_type" : 9,
   "table_type" : "EXTERNAL",
+  "has_Internal" : false,
   "top" : false,
   "increment_loading" : false,
   "last_snapshot_path" : null,
@@ -60,9 +63,14 @@
   "query_hit_count" : 0,
   "partition_column" : null,
   "snapshot_partitions" : { },
+  "snapshot_partitions_info" : { },
+  "snapshot_total_rows" : 0,
   "snapshot_partition_col" : null,
   "selected_snapshot_partition_col" : null,
   "temp_snapshot_path" : null,
   "snapshot_has_broken" : false,
-  "database" : "SSB"
-}
\ No newline at end of file
+  "transactional" : false,
+  "rangePartition" : false,
+  "partition_desc" : null,
+  "table_comment" : null
+}
diff --git 
a/build/sample_project/template/learn_kylin/table/SSB.P_LINEORDER.json 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.P_LINEORDER.json
similarity index 89%
rename from build/sample_project/template/learn_kylin/table/SSB.P_LINEORDER.json
rename to 
build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.P_LINEORDER.json
index 9320e26b85..5ab2a78f8a 100644
--- a/build/sample_project/template/learn_kylin/table/SSB.P_LINEORDER.json
+++ 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.P_LINEORDER.json
@@ -1,8 +1,10 @@
 {
-  "uuid" : "f077883d-5700-9305-988c-a1f0438f2c83",
+  "uuid" : "221f1778-b5c6-6d1e-0f21-a6a30b0c7724",
   "last_modified" : 0,
-  "create_time" : 1632293818812,
+  "create_time" : 1726220562007,
   "version" : "%default_version%",
+  "project" : "learn_kylin",
+  "database" : "SSB",
   "name" : "P_LINEORDER",
   "columns" : [ {
     "id" : "1",
@@ -97,6 +99,7 @@
   } ],
   "source_type" : 9,
   "table_type" : "VIEW",
+  "has_Internal" : false,
   "top" : false,
   "increment_loading" : false,
   "last_snapshot_path" : null,
@@ -105,9 +108,14 @@
   "query_hit_count" : 0,
   "partition_column" : null,
   "snapshot_partitions" : { },
+  "snapshot_partitions_info" : { },
+  "snapshot_total_rows" : 0,
   "snapshot_partition_col" : null,
   "selected_snapshot_partition_col" : null,
   "temp_snapshot_path" : null,
   "snapshot_has_broken" : false,
-  "database" : "SSB"
-}
\ No newline at end of file
+  "transactional" : false,
+  "rangePartition" : false,
+  "partition_desc" : null,
+  "table_comment" : null
+}
diff --git a/build/sample_project/template/learn_kylin/table/SSB.SUPPLIER.json 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.SUPPLIER.json
similarity index 80%
rename from build/sample_project/template/learn_kylin/table/SSB.SUPPLIER.json
rename to 
build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.SUPPLIER.json
index 0d28b44c3f..11cb989826 100644
--- a/build/sample_project/template/learn_kylin/table/SSB.SUPPLIER.json
+++ 
b/build/sample_project/template/core_meta/TABLE_INFO/learn_kylin.SSB.SUPPLIER.json
@@ -1,8 +1,10 @@
 {
-  "uuid" : "068c2ea7-ac67-b091-525d-334da4a7882d",
+  "uuid" : "33559e63-2e7e-a0cd-d309-1215b931b0be",
   "last_modified" : 0,
-  "create_time" : 1632293817966,
+  "create_time" : 1726220561224,
   "version" : "%default_version%",
+  "project" : "learn_kylin",
+  "database" : "SSB",
   "name" : "SUPPLIER",
   "columns" : [ {
     "id" : "1",
@@ -42,6 +44,7 @@
   } ],
   "source_type" : 9,
   "table_type" : "EXTERNAL",
+  "has_Internal" : false,
   "top" : false,
   "increment_loading" : false,
   "last_snapshot_path" : null,
@@ -50,9 +53,14 @@
   "query_hit_count" : 0,
   "partition_column" : null,
   "snapshot_partitions" : { },
+  "snapshot_partitions_info" : { },
+  "snapshot_total_rows" : 0,
   "snapshot_partition_col" : null,
   "selected_snapshot_partition_col" : null,
   "temp_snapshot_path" : null,
   "snapshot_has_broken" : false,
-  "database" : "SSB"
-}
\ No newline at end of file
+  "transactional" : false,
+  "rangePartition" : false,
+  "partition_desc" : null,
+  "table_comment" : null
+}
diff --git 
a/build/sample_project/template/learn_kylin/dataflow/2d07e878-da28-a203-2d2c-185b4c6656f1.json
 
b/build/sample_project/template/learn_kylin/dataflow/2d07e878-da28-a203-2d2c-185b4c6656f1.json
deleted file mode 100644
index 3d9bc1ae26..0000000000
--- 
a/build/sample_project/template/learn_kylin/dataflow/2d07e878-da28-a203-2d2c-185b4c6656f1.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "uuid" : "2d07e878-da28-a203-2d2c-185b4c6656f1",
-  "last_modified" : 0,
-  "create_time" : 1632293938767,
-  "version" : "%default_version%",
-  "status" : "OFFLINE",
-  "cost" : 50,
-  "query_hit_count" : 0,
-  "last_query_time" : 0,
-  "layout_query_hit_count" : { },
-  "segments" : [ ]
-}
\ No newline at end of file
diff --git 
a/build/sample_project/template/learn_kylin/index_plan/2d07e878-da28-a203-2d2c-185b4c6656f1.json
 
b/build/sample_project/template/learn_kylin/index_plan/2d07e878-da28-a203-2d2c-185b4c6656f1.json
deleted file mode 100644
index 1e6cc57302..0000000000
--- 
a/build/sample_project/template/learn_kylin/index_plan/2d07e878-da28-a203-2d2c-185b4c6656f1.json
+++ /dev/null
@@ -1,111 +0,0 @@
-{
-  "uuid" : "2d07e878-da28-a203-2d2c-185b4c6656f1",
-  "last_modified" : 1632293938767,
-  "create_time" : 1632293938765,
-  "version" : "%default_version%",
-  "description" : null,
-  "rule_based_index" : {
-    "dimensions" : [ 53, 55, 46, 43, 40, 42, 32 ],
-    "measures" : [ 100000 ],
-    "global_dim_cap" : null,
-    "aggregation_groups" : [ {
-      "includes" : [ 53, 55 ],
-      "measures" : [ 100000 ],
-      "select_rule" : {
-        "hierarchy_dims" : [ ],
-        "mandatory_dims" : [ 53 ],
-        "joint_dims" : [ ]
-      },
-      "index_range" : "EMPTY"
-    }, {
-      "includes" : [ 46, 43 ],
-      "measures" : [ 100000 ],
-      "select_rule" : {
-        "hierarchy_dims" : [ ],
-        "mandatory_dims" : [ 46 ],
-        "joint_dims" : [ ]
-      },
-      "index_range" : "EMPTY"
-    }, {
-      "includes" : [ 40, 42 ],
-      "measures" : [ 100000 ],
-      "select_rule" : {
-        "hierarchy_dims" : [ ],
-        "mandatory_dims" : [ 40 ],
-        "joint_dims" : [ ]
-      },
-      "index_range" : "EMPTY"
-    }, {
-      "includes" : [ 32 ],
-      "measures" : [ 100000 ],
-      "select_rule" : {
-        "hierarchy_dims" : [ ],
-        "mandatory_dims" : [ ],
-        "joint_dims" : [ ]
-      },
-      "index_range" : "EMPTY"
-    } ],
-    "layout_id_mapping" : [ 10001, 20001, 30001, 40001, 50001, 60001, 70001, 
80001 ],
-    "parent_forward" : 3,
-    "index_start_id" : 10000,
-    "last_modify_time" : 1632294041099,
-    "layout_black_list" : [ ],
-    "scheduler_version" : 2,
-    "index_update_enabled" : true
-  },
-  "indexes" : [ {
-    "id" : 0,
-    "dimensions" : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 
17, 32, 34, 40, 42, 43, 46, 53, 55 ],
-    "measures" : [ 100000 ],
-    "layouts" : [ {
-      "id" : 1,
-      "name" : null,
-      "owner" : null,
-      "col_order" : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 
16, 17, 32, 34, 40, 42, 43, 46, 53, 55, 100000 ],
-      "shard_by_columns" : [ ],
-      "partition_by_columns" : [ ],
-      "sort_by_columns" : [ ],
-      "storage_type" : 20,
-      "update_time" : 1632293938765,
-      "manual" : false,
-      "auto" : false,
-      "base" : true,
-      "draft_version" : null,
-      "index_range" : null
-    } ],
-    "next_layout_offset" : 2
-  }, {
-    "id" : 20000000000,
-    "dimensions" : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 
17, 32, 34, 40, 42, 43, 46, 53, 55 ],
-    "measures" : [ ],
-    "layouts" : [ {
-      "id" : 20000000001,
-      "name" : null,
-      "owner" : null,
-      "col_order" : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 
16, 17, 32, 34, 40, 42, 43, 46, 53, 55 ],
-      "shard_by_columns" : [ ],
-      "partition_by_columns" : [ ],
-      "sort_by_columns" : [ ],
-      "storage_type" : 20,
-      "update_time" : 1632293938765,
-      "manual" : false,
-      "auto" : false,
-      "base" : true,
-      "draft_version" : null,
-      "index_range" : null
-    } ],
-    "next_layout_offset" : 2
-  } ],
-  "override_properties" : { },
-  "to_be_deleted_indexes" : [ ],
-  "auto_merge_time_ranges" : null,
-  "retention_range" : 0,
-  "engine_type" : 80,
-  "next_aggregation_index_id" : 90000,
-  "next_table_index_id" : 20000010000,
-  "agg_shard_by_columns" : [ ],
-  "extend_partition_columns" : [ ],
-  "layout_bucket_num" : { },
-  "approved_additional_recs" : 0,
-  "approved_removal_recs" : 0
-}
\ No newline at end of file
diff --git 
a/build/sample_project/template/learn_kylin/model_desc/2d07e878-da28-a203-2d2c-185b4c6656f1.json
 
b/build/sample_project/template/learn_kylin/model_desc/2d07e878-da28-a203-2d2c-185b4c6656f1.json
deleted file mode 100644
index cdfb569eed..0000000000
--- 
a/build/sample_project/template/learn_kylin/model_desc/2d07e878-da28-a203-2d2c-185b4c6656f1.json
+++ /dev/null
@@ -1,408 +0,0 @@
-{
-  "uuid" : "2d07e878-da28-a203-2d2c-185b4c6656f1",
-  "last_modified" : 1632293938765,
-  "create_time" : 1632293935226,
-  "version" :  "%default_version%",
-  "alias" : "sample_ssb",
-  "owner" : "ADMIN",
-  "config_last_modifier" : null,
-  "config_last_modified" : 0,
-  "description" : "",
-  "fact_table" : "SSB.P_LINEORDER",
-  "fact_table_alias" : null,
-  "management_type" : "MODEL_BASED",
-  "join_tables" : [ {
-    "table" : "SSB.DATES",
-    "kind" : "LOOKUP",
-    "alias" : "DATES",
-    "join" : {
-      "type" : "INNER",
-      "primary_key" : [ "DATES.D_DATEKEY" ],
-      "foreign_key" : [ "P_LINEORDER.LO_ORDERDATE" ],
-      "non_equi_join_condition" : null,
-      "primary_table" : null,
-      "foreign_table" : null
-    },
-    "flattenable" : "flatten",
-    "join_relation_type" : "MANY_TO_ONE"
-  }, {
-    "table" : "SSB.CUSTOMER",
-    "kind" : "LOOKUP",
-    "alias" : "CUSTOMER",
-    "join" : {
-      "type" : "INNER",
-      "primary_key" : [ "CUSTOMER.C_CUSTKEY" ],
-      "foreign_key" : [ "P_LINEORDER.LO_CUSTKEY" ],
-      "non_equi_join_condition" : null,
-      "primary_table" : null,
-      "foreign_table" : null
-    },
-    "flattenable" : "flatten",
-    "join_relation_type" : "MANY_TO_ONE"
-  }, {
-    "table" : "SSB.PART",
-    "kind" : "LOOKUP",
-    "alias" : "PART",
-    "join" : {
-      "type" : "INNER",
-      "primary_key" : [ "PART.P_PARTKEY" ],
-      "foreign_key" : [ "P_LINEORDER.LO_PARTKEY" ],
-      "non_equi_join_condition" : null,
-      "primary_table" : null,
-      "foreign_table" : null
-    },
-    "flattenable" : "flatten",
-    "join_relation_type" : "MANY_TO_ONE"
-  }, {
-    "table" : "SSB.SUPPLIER",
-    "kind" : "LOOKUP",
-    "alias" : "SUPPLIER",
-    "join" : {
-      "type" : "INNER",
-      "primary_key" : [ "SUPPLIER.S_SUPPKEY" ],
-      "foreign_key" : [ "P_LINEORDER.LO_SUPPKEY" ],
-      "non_equi_join_condition" : null,
-      "primary_table" : null,
-      "foreign_table" : null
-    },
-    "flattenable" : "flatten",
-    "join_relation_type" : "MANY_TO_ONE"
-  } ],
-  "filter_condition" : "",
-  "partition_desc" : {
-    "partition_date_column" : "P_LINEORDER.LO_ORDERDATE",
-    "partition_date_start" : 0,
-    "partition_date_format" : "yyyy-MM-dd",
-    "partition_type" : "APPEND",
-    "partition_condition_builder" : 
"org.apache.kylin.metadata.model.PartitionDesc$DefaultPartitionConditionBuilder"
-  },
-  "capacity" : "MEDIUM",
-  "segment_config" : {
-    "auto_merge_enabled" : null,
-    "auto_merge_time_ranges" : null,
-    "volatile_range" : null,
-    "retention_range" : null,
-    "create_empty_segment_enabled" : false
-  },
-  "data_check_desc" : null,
-  "semantic_version" : 0,
-  "storage_type" : 0,
-  "model_type" : "BATCH",
-  "all_named_columns" : [ {
-    "id" : 0,
-    "name" : "LO_SHIPMODE",
-    "column" : "P_LINEORDER.LO_SHIPMODE",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 1,
-    "name" : "LO_LINENUMBER",
-    "column" : "P_LINEORDER.LO_LINENUMBER",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 2,
-    "name" : "LO_ORDTOTALPRICE",
-    "column" : "P_LINEORDER.LO_ORDTOTALPRICE",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 3,
-    "name" : "LO_SUPPLYCOST",
-    "column" : "P_LINEORDER.LO_SUPPLYCOST",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 4,
-    "name" : "LO_SUPPKEY",
-    "column" : "P_LINEORDER.LO_SUPPKEY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 5,
-    "name" : "LO_QUANTITY",
-    "column" : "P_LINEORDER.LO_QUANTITY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 6,
-    "name" : "LO_PARTKEY",
-    "column" : "P_LINEORDER.LO_PARTKEY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 7,
-    "name" : "LO_ORDERKEY",
-    "column" : "P_LINEORDER.LO_ORDERKEY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 8,
-    "name" : "LO_CUSTKEY",
-    "column" : "P_LINEORDER.LO_CUSTKEY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 9,
-    "name" : "LO_SHIPPRIOTITY",
-    "column" : "P_LINEORDER.LO_SHIPPRIOTITY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 10,
-    "name" : "LO_DISCOUNT",
-    "column" : "P_LINEORDER.LO_DISCOUNT",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 11,
-    "name" : "LO_ORDERPRIOTITY",
-    "column" : "P_LINEORDER.LO_ORDERPRIOTITY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 12,
-    "name" : "LO_ORDERDATE",
-    "column" : "P_LINEORDER.LO_ORDERDATE",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 13,
-    "name" : "LO_REVENUE",
-    "column" : "P_LINEORDER.LO_REVENUE",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 14,
-    "name" : "V_REVENUE",
-    "column" : "P_LINEORDER.V_REVENUE",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 15,
-    "name" : "LO_COMMITDATE",
-    "column" : "P_LINEORDER.LO_COMMITDATE",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 16,
-    "name" : "LO_EXTENDEDPRICE",
-    "column" : "P_LINEORDER.LO_EXTENDEDPRICE",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 17,
-    "name" : "LO_TAX",
-    "column" : "P_LINEORDER.LO_TAX",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 18,
-    "name" : "D_WEEKNUMINYEAR",
-    "column" : "DATES.D_WEEKNUMINYEAR"
-  }, {
-    "id" : 19,
-    "name" : "D_LASTDAYINWEEKFL",
-    "column" : "DATES.D_LASTDAYINWEEKFL"
-  }, {
-    "id" : 20,
-    "name" : "D_LASTDAYINMONTHFL",
-    "column" : "DATES.D_LASTDAYINMONTHFL"
-  }, {
-    "id" : 21,
-    "name" : "D_DAYOFWEEK",
-    "column" : "DATES.D_DAYOFWEEK"
-  }, {
-    "id" : 22,
-    "name" : "D_MONTHNUMINYEAR",
-    "column" : "DATES.D_MONTHNUMINYEAR"
-  }, {
-    "id" : 23,
-    "name" : "D_YEARMONTHNUM",
-    "column" : "DATES.D_YEARMONTHNUM"
-  }, {
-    "id" : 24,
-    "name" : "D_YEARMONTH",
-    "column" : "DATES.D_YEARMONTH"
-  }, {
-    "id" : 25,
-    "name" : "D_DAYNUMINMONTH",
-    "column" : "DATES.D_DAYNUMINMONTH"
-  }, {
-    "id" : 26,
-    "name" : "D_SELLINGSEASON",
-    "column" : "DATES.D_SELLINGSEASON"
-  }, {
-    "id" : 27,
-    "name" : "D_WEEKDAYFL",
-    "column" : "DATES.D_WEEKDAYFL"
-  }, {
-    "id" : 28,
-    "name" : "D_YEAR",
-    "column" : "DATES.D_YEAR"
-  }, {
-    "id" : 29,
-    "name" : "D_HOLIDAYFL",
-    "column" : "DATES.D_HOLIDAYFL"
-  }, {
-    "id" : 30,
-    "name" : "D_DAYNUMINWEEK",
-    "column" : "DATES.D_DAYNUMINWEEK"
-  }, {
-    "id" : 31,
-    "name" : "D_DAYNUMINYEAR",
-    "column" : "DATES.D_DAYNUMINYEAR"
-  }, {
-    "id" : 32,
-    "name" : "D_DATE",
-    "column" : "DATES.D_DATE",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 33,
-    "name" : "D_MONTH",
-    "column" : "DATES.D_MONTH"
-  }, {
-    "id" : 34,
-    "name" : "D_DATEKEY",
-    "column" : "DATES.D_DATEKEY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 35,
-    "name" : "C_ADDRESS",
-    "column" : "CUSTOMER.C_ADDRESS"
-  }, {
-    "id" : 36,
-    "name" : "C_NATION",
-    "column" : "CUSTOMER.C_NATION"
-  }, {
-    "id" : 37,
-    "name" : "C_CITY",
-    "column" : "CUSTOMER.C_CITY"
-  }, {
-    "id" : 38,
-    "name" : "C_PHONE",
-    "column" : "CUSTOMER.C_PHONE"
-  }, {
-    "id" : 39,
-    "name" : "C_REGION",
-    "column" : "CUSTOMER.C_REGION"
-  }, {
-    "id" : 40,
-    "name" : "C_NAME",
-    "column" : "CUSTOMER.C_NAME",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 41,
-    "name" : "C_MKTSEGMENT",
-    "column" : "CUSTOMER.C_MKTSEGMENT"
-  }, {
-    "id" : 42,
-    "name" : "C_CUSTKEY",
-    "column" : "CUSTOMER.C_CUSTKEY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 43,
-    "name" : "P_PARTKEY",
-    "column" : "PART.P_PARTKEY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 44,
-    "name" : "P_CONTAINER",
-    "column" : "PART.P_CONTAINER"
-  }, {
-    "id" : 45,
-    "name" : "P_SIZE",
-    "column" : "PART.P_SIZE"
-  }, {
-    "id" : 46,
-    "name" : "P_NAME",
-    "column" : "PART.P_NAME",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 47,
-    "name" : "P_CATEGORY",
-    "column" : "PART.P_CATEGORY"
-  }, {
-    "id" : 48,
-    "name" : "P_TYPE",
-    "column" : "PART.P_TYPE"
-  }, {
-    "id" : 49,
-    "name" : "P_MFGR",
-    "column" : "PART.P_MFGR"
-  }, {
-    "id" : 50,
-    "name" : "P_BRAND",
-    "column" : "PART.P_BRAND"
-  }, {
-    "id" : 51,
-    "name" : "P_COLOR",
-    "column" : "PART.P_COLOR"
-  }, {
-    "id" : 52,
-    "name" : "S_ADDRESS",
-    "column" : "SUPPLIER.S_ADDRESS"
-  }, {
-    "id" : 53,
-    "name" : "S_NAME",
-    "column" : "SUPPLIER.S_NAME",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 54,
-    "name" : "S_NATION",
-    "column" : "SUPPLIER.S_NATION"
-  }, {
-    "id" : 55,
-    "name" : "S_SUPPKEY",
-    "column" : "SUPPLIER.S_SUPPKEY",
-    "status" : "DIMENSION"
-  }, {
-    "id" : 56,
-    "name" : "S_REGION",
-    "column" : "SUPPLIER.S_REGION"
-  }, {
-    "id" : 57,
-    "name" : "S_PHONE",
-    "column" : "SUPPLIER.S_PHONE"
-  }, {
-    "id" : 58,
-    "name" : "S_CITY",
-    "column" : "SUPPLIER.S_CITY"
-  } ],
-  "all_measures" : [ {
-    "name" : "COUNT_ALL",
-    "function" : {
-      "expression" : "COUNT",
-      "parameters" : [ {
-        "type" : "constant",
-        "value" : "1"
-      } ],
-      "returntype" : "bigint"
-    },
-    "column" : null,
-    "comment" : null,
-    "id" : 100000
-  } ],
-  "recommendations_count" : 0,
-  "computed_columns" : [ ],
-  "canvas" : {
-    "coordinate" : {
-      "P_LINEORDER" : {
-        "x" : 625.388895670573,
-        "y" : 272.38889058430993,
-        "width" : 220.0,
-        "height" : 200.0
-      },
-      "CUSTOMER" : {
-        "x" : 315.38889567057305,
-        "y" : 72.3888905843099,
-        "width" : 220.0,
-        "height" : 200.0
-      },
-      "DATES" : {
-        "x" : 275.38889567057294,
-        "y" : 556.8333350287544,
-        "width" : 220.0,
-        "height" : 200.0
-      },
-      "PART" : {
-        "x" : 774.2777845594618,
-        "y" : 605.7222239176433,
-        "width" : 220.0,
-        "height" : 200.0
-      },
-      "SUPPLIER" : {
-        "x" : 1055.388895670573,
-        "y" : 122.3888905843099,
-        "width" : 220.0,
-        "height" : 200.0
-      }
-    },
-    "zoom" : 9.0
-  },
-  "multi_partition_desc" : null,
-  "multi_partition_key_mapping" : null,
-  "fusion_id" : null
-}
\ No newline at end of file
diff --git a/dev-support/release-manager/standalone-docker/.gitignore 
b/dev-support/release-manager/standalone-docker/.gitignore
index 42fcf40e28..f080658f5a 100644
--- a/dev-support/release-manager/standalone-docker/.gitignore
+++ b/dev-support/release-manager/standalone-docker/.gitignore
@@ -1 +1 @@
-all_in_one/package/*
+all-in-one/package/*
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/Dockerfile 
b/dev-support/release-manager/standalone-docker/all-in-one/Dockerfile
new file mode 100644
index 0000000000..a475825362
--- /dev/null
+++ b/dev-support/release-manager/standalone-docker/all-in-one/Dockerfile
@@ -0,0 +1,70 @@
+# syntax=docker/dockerfile:1
+FROM ubuntu:22.04
+RUN apt update
+RUN apt install -y openjdk-8-jdk vim less net-tools lsof ssh curl
+ENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64
+
+# ssh config
+COPY conf/ssh/sshd.conf /etc/ssh/sshd_config.d/
+COPY conf/ssh/ssh.conf /etc/ssh/ssh_config.d/
+RUN ssh-keygen -t ed25519 -P '' -f ~/.ssh/id_ed25519
+RUN cat ~/.ssh/id_ed25519.pub > ~/.ssh/authorized_keys
+RUN chmod 0400 ~/.ssh/authorized_keys
+
+# install mysql
+ADD scripts/install-mysql.sh /opt/
+RUN chmod 0755 /opt/install-mysql.sh
+RUN /bin/bash /opt/install-mysql.sh
+
+# install hadoop
+ENV HADOOP_VERSION=3.2.4
+ENV HIVE_VERSION=3.1.3
+ENV ZOOKEEPER_VERSION=3.7.2
+
+ENV HADOOP_HOME=/opt/hadoop-$HADOOP_VERSION
+ENV HIVE_HOME=/opt/apache-hive-$HIVE_VERSION-bin
+ENV ZOOKEEPER_HOME=/opt/apache-zookeeper-$ZOOKEEPER_VERSION-bin
+
+WORKDIR /opt
+ADD 
https://archive.apache.org/dist/hadoop/common/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
 /opt/
+ADD 
https://archive.apache.org/dist/hive/hive-$HIVE_VERSION/apache-hive-$HIVE_VERSION-bin.tar.gz
 /opt/
+ADD 
https://archive.apache.org/dist/zookeeper/zookeeper-$ZOOKEEPER_VERSION/apache-zookeeper-$ZOOKEEPER_VERSION-bin.tar.gz
 /opt/
+ADD 
https://repo1.maven.org/maven2/com/mysql/mysql-connector-j/8.0.33/mysql-connector-j-8.0.33.jar
 /opt/
+RUN tar -zxf /opt/hadoop-$HADOOP_VERSION.tar.gz
+RUN tar -zxf /opt/apache-hive-$HIVE_VERSION-bin.tar.gz
+RUN tar -zxf /opt/apache-zookeeper-$ZOOKEEPER_VERSION-bin.tar.gz
+RUN rm -f /opt/hadoop-$HADOOP_VERSION.tar.gz
+RUN rm -f /opt/apache-hive-$HIVE_VERSION-bin.tar.gz
+RUN rm -f /opt/apache-zookeeper-$ZOOKEEPER_VERSION-bin.tar.gz
+
+RUN mkdir -p /data/hadoop
+RUN mkdir -p /data/zookeeper
+COPY scripts/start-historyserver.sh $HADOOP_HOME/sbin/
+RUN chmod 0755 $HADOOP_HOME/sbin/start-historyserver.sh
+COPY conf/hadoop/* $HADOOP_HOME/etc/hadoop/
+COPY scripts/start-hivemetastore.sh $HIVE_HOME/bin/
+COPY scripts/start-hiveserver2.sh $HIVE_HOME/bin/
+RUN chmod 0755 $HIVE_HOME/bin/start-hivemetastore.sh
+RUN chmod 0755 $HIVE_HOME/bin/start-hiveserver2.sh
+COPY conf/hive/* $HIVE_HOME/conf/
+COPY conf/zookeeper/* $ZOOKEEPER_HOME/conf/
+ENV 
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:$HADOOP_HOME/bin:$HIVE_HOME/bin
+
+# hive lib extra
+RUN rm -f $HIVE_HOME/lib/guava-*.jar
+RUN cp $HADOOP_HOME/share/hadoop/common/lib/guava-*.jar $HIVE_HOME/lib/
+RUN cp /opt/mysql-connector-j-8.0.33.jar $HIVE_HOME/lib/
+
+# install apache kylin
+RUN mkdir -p /home/kylin
+ADD package/apache-kylin-5.0.0-GA-bin.tar.gz /home/kylin
+ENV KYLIN_HOME=/home/kylin/apache-kylin-5.0.0-GA-bin
+RUN cp /opt/mysql-connector-j-8.0.33.jar $KYLIN_HOME/lib/ext/
+COPY conf/kylin/kylin.properties.override $KYLIN_HOME/conf/
+
+# startup script
+COPY scripts/entrypoint.sh /home/kylin/
+RUN chmod 0755 /home/kylin/entrypoint.sh
+
+WORKDIR /home/kylin
+CMD ["/bin/bash", "entrypoint.sh"]
diff --git a/dev-support/release-manager/standalone-docker/all_in_one/README.md 
b/dev-support/release-manager/standalone-docker/all-in-one/README.md
similarity index 100%
rename from dev-support/release-manager/standalone-docker/all_in_one/README.md
rename to dev-support/release-manager/standalone-docker/all-in-one/README.md
diff --git a/dev-support/release-manager/standalone-docker/all-in-one/build.sh 
b/dev-support/release-manager/standalone-docker/all-in-one/build.sh
new file mode 100755
index 0000000000..9da896012b
--- /dev/null
+++ b/dev-support/release-manager/standalone-docker/all-in-one/build.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+TAG=5.0.0-GA
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+cd ${DIR} || exit
+echo "build image in dir "${DIR}
+
+echo "package kylin in local for building image"
+if [[ ! -d ${DIR}/package/ ]]; then
+    mkdir -p ${DIR}/package/
+fi
+
+# The official apache kylin package has no Spark binary included, prepare 
manually with following steps:
+# 1. Download apache kylin binary and extract
+# 2. Execute sbin/download-spark-user.sh and should have a new spark folder at 
the root of kylin dir
+# 3. Re-compress kylin folder and put it to the package dir for Dockerfile use
+#
+# wget 
https://archive.apache.org/dist/kylin/apache-kylin-5.0.0-GA/apache-kylin-5.0.0-GA-bin.tar.gz
 -P ${DIR}/package/
+# tar zxf apache-kylin-5.0.0-GA-bin.tar.gz
+# cd apache-kylin-5.0.0-GA-bin
+# bash sbin/download-spark-user.sh
+# tar -czf apache-kylin-5.0.0-GA-bin.tar.gz apache-kylin-5.0.0-GA-bin
+# Notice - For mac tar command use: tar czf apache-kylin-5.0.0-GA-bin.tar.gz 
--no-mac-metadata apache-kylin-5.0.0-GA-bin
+# to avoid AppleDouble format hidden files inside the compressed file
+
+echo "start to build kylin standalone docker image"
+docker build . -t apachekylin/apache-kylin-standalone:${TAG}
+
+BUILD_RESULT=$?
+if [ "$BUILD_RESULT" != "0" ]; then
+  echo "Image build failed, please check"
+  exit 1
+fi
+echo "Image build succeed"
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/capacity-scheduler.xml
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/capacity-scheduler.xml
new file mode 100644
index 0000000000..a9ae992cfd
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/capacity-scheduler.xml
@@ -0,0 +1,77 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Copyright (C) 2020 Kyligence Inc. All rights reserved.
+ *
+ * http://kyligence.io
+ *
+ * This software is the confidential and proprietary information of
+ * Kyligence Inc. ("Confidential Information"). You shall not disclose
+ * such Confidential Information and shall use it only in accordance
+ * with the terms of the license agreement you entered into with
+ * Kyligence Inc.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+-->
+<configuration>
+    <property>
+        <name>yarn.scheduler.capacity.default.minimum-user-limit-percent</name>
+        <value>100</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+        <value>0.5</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.maximum-applications</name>
+        <value>10000</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.node-locality-delay</name>
+        <value>40</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.resource-calculator</name>
+        
<value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.root.capacity</name>
+        <value>100</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.root.default.capacity</name>
+        <value>100</value>
+    </property>
+    <property>
+        
<name>yarn.scheduler.capacity.root.default.maximum-am-resource-percent</name>
+        <value>0.5</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+        <value>100</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.root.default.state</name>
+        <value>RUNNING</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+        <value>1</value>
+    </property>
+    <property>
+        <name>yarn.scheduler.capacity.root.queues</name>
+        <value>default</value>
+    </property>
+</configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/core-site.xml
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/core-site.xml
similarity index 90%
rename from 
dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/core-site.xml
rename to 
dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/core-site.xml
index 6fe6404e5c..ad356d568d 100644
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/core-site.xml
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/core-site.xml
@@ -20,10 +20,13 @@
     <property>
         <name>hadoop.tmp.dir</name>
         <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
     </property>
     <property>
         <name>fs.defaultFS</name>
         <value>hdfs://localhost:9000</value>
     </property>
+    <property>
+        <name>hadoop.http.staticuser.user</name>
+        <value>root</value>
+    </property>
 </configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/hadoop-env.sh
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/hadoop-env.sh
new file mode 100644
index 0000000000..e8c5b38a9a
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/hadoop-env.sh
@@ -0,0 +1,423 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hadoop-specific environment variables here.
+
+##
+## THIS FILE ACTS AS THE MASTER FILE FOR ALL HADOOP PROJECTS.
+## SETTINGS HERE WILL BE READ BY ALL HADOOP COMMANDS.  THEREFORE,
+## ONE CAN USE THIS FILE TO SET YARN, HDFS, AND MAPREDUCE
+## CONFIGURATION OPTIONS INSTEAD OF xxx-env.sh.
+##
+## Precedence rules:
+##
+## {yarn-env.sh|hdfs-env.sh} > hadoop-env.sh > hard-coded defaults
+##
+## {YARN_xyz|HDFS_xyz} > HADOOP_xyz > hard-coded defaults
+##
+
+# Many of the options here are built from the perspective that users
+# may want to provide OVERWRITING values on the command line.
+# For example:
+#
+#  JAVA_HOME=/usr/java/testing hdfs dfs -ls
+#
+# Therefore, the vast majority (BUT NOT ALL!) of these defaults
+# are configured for substitution and not append.  If append
+# is preferable, modify this file accordingly.
+
+###
+# Generic settings for HADOOP
+###
+
+# Technically, the only required environment variable is JAVA_HOME.
+# All others are optional.  However, the defaults are probably not
+# preferred.  Many sites configure these options outside of Hadoop,
+# such as in /etc/profile.d
+
+# The java implementation to use. By default, this environment
+# variable is REQUIRED on ALL platforms except OS X!
+export JAVA_HOME="/usr/lib/jvm/java-8-openjdk-amd64"
+
+# Location of Hadoop.  By default, Hadoop will attempt to determine
+# this location based upon its execution path.
+# export HADOOP_HOME=
+
+# Location of Hadoop's configuration information.  i.e., where this
+# file is living. If this is not defined, Hadoop will attempt to
+# locate it based upon its execution path.
+#
+# NOTE: It is recommend that this variable not be set here but in
+# /etc/profile.d or equivalent.  Some options (such as
+# --config) may react strangely otherwise.
+#
+# export HADOOP_CONF_DIR=${HADOOP_HOME}/etc/hadoop
+
+# The maximum amount of heap to use (Java -Xmx).  If no unit
+# is provided, it will be converted to MB.  Daemons will
+# prefer any Xmx setting in their respective _OPT variable.
+# There is no default; the JVM will autoscale based upon machine
+# memory size.
+# export HADOOP_HEAPSIZE_MAX=
+
+# The minimum amount of heap to use (Java -Xms).  If no unit
+# is provided, it will be converted to MB.  Daemons will
+# prefer any Xms setting in their respective _OPT variable.
+# There is no default; the JVM will autoscale based upon machine
+# memory size.
+# export HADOOP_HEAPSIZE_MIN=
+
+# Enable extra debugging of Hadoop's JAAS binding, used to set up
+# Kerberos security.
+# export HADOOP_JAAS_DEBUG=true
+
+# Extra Java runtime options for all Hadoop commands. We don't support
+# IPv6 yet/still, so by default the preference is set to IPv4.
+# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
+# For Kerberos debugging, an extended option set logs more information
+# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true 
-Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
+
+# Some parts of the shell code may do special things dependent upon
+# the operating system.  We have to set this here. See the next
+# section as to why....
+export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
+
+# Extra Java runtime options for some Hadoop commands
+# and clients (i.e., hdfs dfs -blah).  These get appended to HADOOP_OPTS for
+# such commands.  In most cases, # this should be left empty and
+# let users supply it on the command line.
+# export HADOOP_CLIENT_OPTS=""
+
+#
+# A note about classpaths.
+#
+# By default, Apache Hadoop overrides Java's CLASSPATH
+# environment variable.  It is configured such
+# that it starts out blank with new entries added after passing
+# a series of checks (file/dir exists, not already listed aka
+# de-deduplication).  During de-deduplication, wildcards and/or
+# directories are *NOT* expanded to keep it simple. Therefore,
+# if the computed classpath has two specific mentions of
+# awesome-methods-1.0.jar, only the first one added will be seen.
+# If two directories are in the classpath that both contain
+# awesome-methods-1.0.jar, then Java will pick up both versions.
+
+# An additional, custom CLASSPATH. Site-wide configs should be
+# handled via the shellprofile functionality, utilizing the
+# hadoop_add_classpath function for greater control and much
+# harder for apps/end-users to accidentally override.
+# Similarly, end users should utilize ${HOME}/.hadooprc .
+# This variable should ideally only be used as a short-cut,
+# interactive way for temporary additions on the command line.
+# export HADOOP_CLASSPATH="/some/cool/path/on/your/machine"
+
+# Should HADOOP_CLASSPATH be first in the official CLASSPATH?
+# export HADOOP_USER_CLASSPATH_FIRST="yes"
+
+# If HADOOP_USE_CLIENT_CLASSLOADER is set, the classpath along
+# with the main jar are handled by a separate isolated
+# client classloader when 'hadoop jar', 'yarn jar', or 'mapred job'
+# is utilized. If it is set, HADOOP_CLASSPATH and
+# HADOOP_USER_CLASSPATH_FIRST are ignored.
+# export HADOOP_USE_CLIENT_CLASSLOADER=true
+
+# HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES overrides the default definition of
+# system classes for the client classloader when HADOOP_USE_CLIENT_CLASSLOADER
+# is enabled. Names ending in '.' (period) are treated as package names, and
+# names starting with a '-' are treated as negative matches. For example,
+# export 
HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
+
+# Enable optional, bundled Hadoop features
+# This is a comma delimited list.  It may NOT be overridden via .hadooprc
+# Entries may be added/removed as needed.
+# export 
HADOOP_OPTIONAL_TOOLS="hadoop-kafka,hadoop-openstack,hadoop-azure-datalake,hadoop-aliyun,hadoop-aws,hadoop-azure"
+
+###
+# Options for remote shell connectivity
+###
+
+# There are some optional components of hadoop that allow for
+# command and control of remote hosts.  For example,
+# start-dfs.sh will attempt to bring up all NNs, DNS, etc.
+
+# Options to pass to SSH when one of the "log into a host and
+# start/stop daemons" scripts is executed
+# export HADOOP_SSH_OPTS="-o BatchMode=yes -o StrictHostKeyChecking=no -o 
ConnectTimeout=10s"
+
+# The built-in ssh handler will limit itself to 10 simultaneous connections.
+# For pdsh users, this sets the fanout size ( -f )
+# Change this to increase/decrease as necessary.
+# export HADOOP_SSH_PARALLEL=10
+
+# Filename which contains all of the hosts for any remote execution
+# helper scripts # such as workers.sh, start-dfs.sh, etc.
+# export HADOOP_WORKERS="${HADOOP_CONF_DIR}/workers"
+
+###
+# Options for all daemons
+###
+#
+
+#
+# Many options may also be specified as Java properties.  It is
+# very common, and in many cases, desirable, to hard-set these
+# in daemon _OPTS variables.  Where applicable, the appropriate
+# Java property is also identified.  Note that many are re-used
+# or set differently in certain contexts (e.g., secure vs
+# non-secure)
+#
+
+# Where (primarily) daemon log files are stored.
+# ${HADOOP_HOME}/logs by default.
+# Java property: hadoop.log.dir
+# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+
+# A string representing this instance of hadoop. $USER by default.
+# This is used in writing log and pid files, so keep that in mind!
+# Java property: hadoop.id.str
+# export HADOOP_IDENT_STRING=$USER
+
+# How many seconds to pause after stopping a daemon
+# export HADOOP_STOP_TIMEOUT=5
+
+# Where pid files are stored.  /tmp by default.
+# export HADOOP_PID_DIR=/tmp
+
+# Default log4j setting for interactive commands
+# Java property: hadoop.root.logger
+# export HADOOP_ROOT_LOGGER=INFO,console
+
+# Default log4j setting for daemons spawned explicitly by
+# --daemon option of hadoop, hdfs, mapred and yarn command.
+# Java property: hadoop.root.logger
+# export HADOOP_DAEMON_ROOT_LOGGER=DEBUG,RFA
+
+# Default log level and output location for security-related messages.
+# You will almost certainly want to change this on a per-daemon basis via
+# the Java property (i.e., -Dhadoop.security.logger=foo). (Note that the
+# defaults for the NN and 2NN override this by default.)
+# Java property: hadoop.security.logger
+# export HADOOP_SECURITY_LOGGER=INFO,NullAppender
+
+# Default process priority level
+# Note that sub-processes will also run at this level!
+# export HADOOP_NICENESS=0
+
+# Default name for the service level authorization file
+# Java property: hadoop.policy.file
+# export HADOOP_POLICYFILE="hadoop-policy.xml"
+
+#
+# NOTE: this is not used by default!  <-----
+# You can define variables right here and then re-use them later on.
+# For example, it is common to use the same garbage collection settings
+# for all the daemons.  So one could define:
+#
+# export HADOOP_GC_SETTINGS="-verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps"
+#
+# .. and then use it as per the b option under the namenode.
+
+###
+# Secure/privileged execution
+###
+
+#
+# Out of the box, Hadoop uses jsvc from Apache Commons to launch daemons
+# on privileged ports.  This functionality can be replaced by providing
+# custom functions.  See hadoop-functions.sh for more information.
+#
+
+# The jsvc implementation to use. Jsvc is required to run secure datanodes
+# that bind to privileged ports to provide authentication of data transfer
+# protocol.  Jsvc is not required if SASL is configured for authentication of
+# data transfer protocol using non-privileged ports.
+# export JSVC_HOME=/usr/bin
+
+#
+# This directory contains pids for secure and privileged processes.
+#export HADOOP_SECURE_PID_DIR=${HADOOP_PID_DIR}
+
+#
+# This directory contains the logs for secure and privileged processes.
+# Java property: hadoop.log.dir
+# export HADOOP_SECURE_LOG=${HADOOP_LOG_DIR}
+
+#
+# When running a secure daemon, the default value of HADOOP_IDENT_STRING
+# ends up being a bit bogus.  Therefore, by default, the code will
+# replace HADOOP_IDENT_STRING with HADOOP_xx_SECURE_USER.  If one wants
+# to keep HADOOP_IDENT_STRING untouched, then uncomment this line.
+# export HADOOP_SECURE_IDENT_PRESERVE="true"
+
+###
+# NameNode specific parameters
+###
+
+# Default log level and output location for file system related change
+# messages. For non-namenode daemons, the Java property must be set in
+# the appropriate _OPTS if one wants something other than INFO,NullAppender
+# Java property: hdfs.audit.logger
+# export HDFS_AUDIT_LOGGER=INFO,NullAppender
+
+# Specify the JVM options to be used when starting the NameNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# a) Set JMX options
+# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true 
-Dcom.sun.management.jmxremote.authenticate=false 
-Dcom.sun.management.jmxremote.ssl=false 
-Dcom.sun.management.jmxremote.port=1026"
+#
+# b) Set garbage collection logs
+# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} 
-Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+#
+# c) ... or set them directly
+# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails 
-XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps 
-Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
+
+# this is the default:
+# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
+
+###
+# SecondaryNameNode specific parameters
+###
+# Specify the JVM options to be used when starting the SecondaryNameNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# This is the default:
+# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
+
+###
+# DataNode specific parameters
+###
+# Specify the JVM options to be used when starting the DataNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# This is the default:
+# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
+
+# On secure datanodes, user to run the datanode as after dropping privileges.
+# This **MUST** be uncommented to enable secure HDFS if using privileged ports
+# to provide authentication of data transfer protocol.  This **MUST NOT** be
+# defined if SASL is configured for authentication of data transfer protocol
+# using non-privileged ports.
+# This will replace the hadoop.id.str Java property in secure mode.
+# export HDFS_DATANODE_SECURE_USER=hdfs
+
+# Supplemental options for secure datanodes
+# By default, Hadoop uses jsvc which needs to know to launch a
+# server jvm.
+# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
+
+###
+# NFS3 Gateway specific parameters
+###
+# Specify the JVM options to be used when starting the NFS3 Gateway.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_NFS3_OPTS=""
+
+# Specify the JVM options to be used when starting the Hadoop portmapper.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_PORTMAP_OPTS="-Xmx512m"
+
+# Supplemental options for priviliged gateways
+# By default, Hadoop uses jsvc which needs to know to launch a
+# server jvm.
+# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server"
+
+# On privileged gateways, user to run the gateway as after dropping privileges
+# This will replace the hadoop.id.str Java property in secure mode.
+# export HDFS_NFS3_SECURE_USER=nfsserver
+
+###
+# ZKFailoverController specific parameters
+###
+# Specify the JVM options to be used when starting the ZKFailoverController.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_ZKFC_OPTS=""
+
+###
+# QuorumJournalNode specific parameters
+###
+# Specify the JVM options to be used when starting the QuorumJournalNode.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_JOURNALNODE_OPTS=""
+
+###
+# HDFS Balancer specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Balancer.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_BALANCER_OPTS=""
+
+###
+# HDFS Mover specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Mover.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_MOVER_OPTS=""
+
+###
+# Router-based HDFS Federation specific parameters
+# Specify the JVM options to be used when starting the RBF Routers.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_DFSROUTER_OPTS=""
+
+###
+# HDFS StorageContainerManager specific parameters
+###
+# Specify the JVM options to be used when starting the HDFS Storage Container 
Manager.
+# These options will be appended to the options specified as HADOOP_OPTS
+# and therefore may override any similar flags set in HADOOP_OPTS
+#
+# export HDFS_STORAGECONTAINERMANAGER_OPTS=""
+
+###
+# Advanced Users Only!
+###
+
+#
+# When building Hadoop, one can add the class paths to the commands
+# via this special env var:
+# export HADOOP_ENABLE_BUILD_PATHS="true"
+
+#
+# To prevent accidents, shell commands be (superficially) locked
+# to only allow certain users to execute certain subcommands.
+# It uses the format of (command)_(subcommand)_USER.
+#
+# For example, to limit who can execute the namenode command,
+# export HDFS_NAMENODE_USER=hdfs
+
+export HDFS_NAMENODE_USER="root"
+export HDFS_DATANODE_USER="root"
+export HDFS_SECONDARYNAMENODE_USER="root"
+export YARN_RESOURCEMANAGER_USER="root"
+export YARN_NODEMANAGER_USER="root"
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/hdfs-site.xml
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/hdfs-site.xml
similarity index 78%
rename from 
dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/hdfs-site.xml
rename to 
dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/hdfs-site.xml
index 77b9460eb0..76fc9c68fa 100644
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/hdfs-site.xml
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/hdfs-site.xml
@@ -21,12 +21,4 @@
         <name>dfs.replication</name>
         <value>1</value>
     </property>
-    <property>
-        <name>dfs.namenode.name.dir</name>
-        <value>/data/hadoop/dfs/name</value>
-    </property>
-    <property>
-        <name>dfs.datanode.data.dir</name>
-        <value>/data/hadoop/dfs/data</value>
-    </property>
 </configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/core-site.xml
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/mapred-site.xml
similarity index 77%
rename from 
dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/core-site.xml
rename to 
dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/mapred-site.xml
index 2e7e4a5671..ab81b95196 100644
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/core-site.xml
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/mapred-site.xml
@@ -18,12 +18,11 @@
 
 <configuration>
     <property>
-        <name>hadoop.tmp.dir</name>
-        <value>/data/hadoop</value>
-        <description>Abase for other temporary directories.</description>
+        <name>mapreduce.framework.name</name>
+        <value>yarn</value>
     </property>
     <property>
-        <name>fs.defaultFS</name>
-        <value>hdfs://kylin5-machine:9000</value>
+        <name>mapreduce.application.classpath</name>
+        
<value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*:$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
     </property>
 </configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/yarn-site.xml
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/yarn-site.xml
similarity index 61%
rename from 
dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/yarn-site.xml
rename to 
dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/yarn-site.xml
index 809688d8fa..7d06699b94 100644
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/yarn-site.xml
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hadoop/yarn-site.xml
@@ -1,4 +1,5 @@
-<?xml version="1.0"?>
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
@@ -13,53 +14,48 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 <configuration>
-
-    <property>
-        <name>yarn.nodemanager.hostname</name>
-        <value>kylin5-machine</value>
-    </property>
     <property>
-        <name>yarn.nodemanager.hostname</name>
-        <value>kylin5-machine</value>
+        
<name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
+        <value>98</value>
     </property>
     <property>
-        <name>yarn.resourcemanager.scheduler.class</name>
-        
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
+        <name>yarn.nodemanager.aux-services</name>
+        <value>mapreduce_shuffle</value>
     </property>
     <property>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-        <value>256</value>
+        <name>yarn.nodemanager.env-whitelist</name>
+        
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,PATH,LANG,TZ,HADOOP_MAPRED_HOME</value>
     </property>
     <property>
-        <name>yarn.scheduler.maximum-allocation-mb</name>
-        <value>6144</value>
+        <name>yarn.resourcemanager.scheduler.class</name>
+        
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
     </property>
     <property>
-        <name>yarn.nodemanager.vmem-check-enabled</name>
-        <value>false</value>
+        <name>yarn.log-aggregation-enable</name>
+        <value>true</value>
     </property>
     <property>
-        <name>yarn.nodemanager.resource.memory-mb</name>
-        <value>14336</value>
+        <name>yarn.log.dir</name>
+        <value>/data/hadoop</value>
     </property>
     <property>
-        <name>yarn.nodemanager.resource.cpu-vcores</name>
-        <value>7</value>
+        <name>yarn.log.server.url</name>
+        <value>http://localhost:19888/jobhistory/logs</value>
     </property>
     <property>
-        <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-        <value>0.6</value>
+        <name>yarn.scheduler.minimum-allocation-vcores</name>
+        <value>1</value>
     </property>
     <property>
-        
<name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
-        <value>98.5</value>
+        <name>yarn.scheduler.maximum-allocation-vcores</name>
+        <value>1</value>
     </property>
     <property>
-        <name>yarn.nodemanager.aux-services</name>
-        <value>mapreduce_shuffle</value>
+        <name>yarn.scheduler.minimum-allocation-mb</name>
+        <value>256</value>
     </property>
     <property>
-        <name>yarn.resourcemanager.zk-address</name>
-        <value>localhost:2181</value>
+        <name>yarn.scheduler.maximum-allocation-mb</name>
+        <value>4096</value>
     </property>
-</configuration>
\ No newline at end of file
+</configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/conf/hive/hive-env.sh
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hive/hive-env.sh
new file mode 100644
index 0000000000..b86c570d7f
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hive/hive-env.sh
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI etc.) is available via the environment
+# variable SERVICE
+
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+# if [ "$SERVICE" = "cli" ]; then
+#   if [ -z "$DEBUG" ]; then
+#     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m 
-XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC 
-XX:-UseGCOverheadLimit"
+#   else
+#     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m 
-XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+#   fi
+# fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+#
+# export HADOOP_HEAPSIZE=1024
+#
+# Larger heap size may be required when running queries over large number of 
files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size 
would also be
+# appropriate for hive server.
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=/opt/hadoop-3.2.4
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR=/opt/apache-hive-3.1.3-bin/conf
+
+# Folder containing extra libraries required for hive compilation/execution 
can be controlled by:
+# export HIVE_AUX_JARS_PATH=
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/conf/hive/hive-site.xml
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hive/hive-site.xml
new file mode 100644
index 0000000000..c81b18fefc
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/hive/hive-site.xml
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+    <property>
+        <name>hive.metastore.warehouse.dir</name>
+        <value>/user/hive3/warehouse</value>
+    </property>
+    <property>
+        <name>hive.txn.manager</name>
+        <value>org.apache.hadoop.hive.ql.lockmgr.DbTxnManager</value>
+    </property>
+    <property>
+        <name>hive.support.concurrency</name>
+        <value>true</value>
+    </property>
+    <property>
+        <name>hive.server2.enable.doAs</name>
+        <value>false</value>
+    </property>
+    <property>
+        <name>hive.server2.thrift.bind.host</name>
+        <value>127.0.0.1</value>
+    </property>
+    <property>
+        <name>hive.metastore.uris</name>
+        <value>thrift://127.0.0.1:9083</value>
+    </property>
+    <property>
+        <name>hive.metastore.db.type</name>
+        <value>mysql</value>
+    </property>
+    <property>
+        <name>javax.jdo.option.ConnectionDriverName</name>
+        <value>com.mysql.cj.jdbc.Driver</value>
+    </property>
+    <property>
+        <name>javax.jdo.option.ConnectionURL</name>
+        
<value>jdbc:mysql://127.0.0.1:3306/hive3?useSSL=false&amp;allowPublicKeyRetrieval=true&amp;characterEncoding=UTF-8</value>
+    </property>
+    <property>
+        <name>javax.jdo.option.ConnectionUserName</name>
+        <value>root</value>
+    </property>
+    <property>
+        <name>javax.jdo.option.ConnectionPassword</name>
+        <value>123456</value>
+    </property>
+    <property>
+        <name>hive.metastore.event.db.notification.api.auth</name>
+        <value>false</value>
+    </property>
+</configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/kylin/kylin.properties.override
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/kylin/kylin.properties.override
similarity index 53%
rename from 
dev-support/release-manager/standalone-docker/all_in_one/conf/kylin/kylin.properties.override
rename to 
dev-support/release-manager/standalone-docker/all-in-one/conf/kylin/kylin.properties.override
index 5c6cfc0a75..44b387cf30 100644
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/kylin/kylin.properties.override
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/kylin/kylin.properties.override
@@ -15,61 +15,63 @@
 # limitations under the License.
 #
 
-# The http session will timeout in 3 hours
-spring.session.store-type=JDBC
-spring.session.timeout=10800
-
-# Turn off query cache
-kylin.query.cache-enabled=false
-
 server.port=7070
-kylin.query.init-sparder-async=false
-
-kylin.env.apache-hadoop-conf-dir=/opt/hadoop-3.2.1/etc/hadoop
-kylin.env.apache-hive-conf-dir=/opt/apache-hive-3.1.2-bin/conf
+kylin.env.apache-hadoop-conf-dir=/opt/hadoop-3.2.4/etc/hadoop
+kylin.env.apache-hive-conf-dir=/opt/apache-hive-3.1.3-bin/conf
 
-# The metastore connection information
-kylin.metadata.url=kylin@jdbc,driverClassName=com.mysql.jdbc.Driver,url=jdbc:mysql://localhost:3306/kylin?useUnicode=true&characterEncoding=utf8,username=root,password=123456,maxTotal=50,maxIdle=8
-
-kylin.env.zookeeper-connect-string=localhost:2181
+kylin.metadata.url=kylin@jdbc,driverClassName=com.mysql.jdbc.Driver,url=jdbc:mysql://127.0.0.1:3306/kylin?useSSL=false&allowPublicKeyRetrieval=true&useUnicode=true&characterEncoding=utf8,username=root,password=123456,maxTotal=50,maxIdle=8
+kylin.env.zookeeper-connect-string=127.0.0.1:2181
 kylin.env.hdfs-working-dir=/kylin
 
-# The yarn resource used by query engine(spark session)
+# Query
 kylin.storage.columnar.spark-conf.spark.driver.memory=512M
 kylin.storage.columnar.spark-conf.spark.driver.memoryOverhead=256M
+kylin.storage.columnar.spark-conf.spark.memory.offHeap.enabled=true
+kylin.storage.columnar.spark-conf.spark.memory.offHeap.size=1g
 kylin.storage.columnar.spark-conf.spark.executor.cores=1
 kylin.storage.columnar.spark-conf.spark.executor.instances=1
-kylin.storage.columnar.spark-conf.spark.executor.memory=2048M
-kylin.storage.columnar.spark-conf.spark.executor.memoryOverhead=256M
+kylin.storage.columnar.spark-conf.spark.executor.memory=1024M
+kylin.storage.columnar.spark-conf.spark.executor.memoryOverhead=512M
+kylin.storage.columnar.spark-conf.spark.driver.host=127.0.0.1
 
-# The yarn resource used by build engine(spark session)
+# Build
 kylin.engine.driver-memory-strategy=2,500
 kylin.engine.spark-conf.spark.driver.memory=512M
 kylin.engine.spark-conf.spark.driver.memoryOverhead=256M
+kylin.engine.spark-conf.spark.memory.offHeap.enabled=true
+kylin.engine.spark-conf.spark.memory.offHeap.size=1g
 kylin.engine.spark-conf.spark.executor.cores=1
 kylin.engine.spark-conf.spark.executor.instances=1
-kylin.engine.spark-conf.spark.executor.memory=2048M
+kylin.engine.spark-conf.spark.executor.memory=1024M
 kylin.engine.spark-conf.spark.executor.memoryOverhead=512M
+kylin.engine.spark-conf.spark.driver.host=127.0.0.1
 
-# The yarn resource used by query engine(spark session) Async Query
+# Async Query
 kylin.query.async-query.spark-conf.spark.executor.cores=1
 kylin.query.async-query.spark-conf.spark.driver.memory=1024M
 kylin.query.async-query.spark-conf.spark.executor.memory=1024M
 kylin.query.async-query.spark-conf.spark.executor.instances=1
 kylin.query.async-query.spark-conf.spark.executor.memoryOverhead=512M
 
-# Turn off the password force reset function
+# Other
 kylin.metadata.random-admin-password.enabled=false
-
 kylin.query.engine.push-down.enable-prepare-statement-with-params=true
 kylin.query.calcite.extras-props.FUN=standard,oracle
 kylin.circuit-breaker.threshold.project=500
 kylin.engine.resource-request-over-limit-proportion=3.0
 
-# Kylin can only run two jobs at the same time
-kylin.job.max-concurrent-jobs=2
-
-# If this switch is turned on, Kylin will check node's available memory, if 
remained
-# memory is not fulfilled, job will be hanged.
-# In some cases, the Kylin will refuse to start any build job by this check 
logic.
+# Disable job memory acquire limit
 kylin.job.auto-set-concurrent-jobs=false
+
+# ================ kylin5 extra config ================
+
+kylin.storage.columnar.spark-conf.spark.broadcast.autoClean.enabled=false
+kylin.storage.columnar.spark-conf.spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs.endpoint=hdfs://127.0.0.1:9000/
+kylin.storage.columnar.spark-conf.spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.libhdfs3_conf=${KYLIN_HOME}/hadoop_conf/hdfs-site.xml
+kylin.storage.columnar.spark-conf.spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs_cache.max_size=1Gi
+
+kylin.engine.spark-conf.spark-conf.spark.broadcast.autoClean.enabled=false
+kylin.engine.spark-conf.spark.gluten.sql.columnar.backend.ch.runtime_config.storage_configuration.disks.hdfs.endpoint=hdfs://127.0.0.1:9000/
+kylin.engine.spark-conf.spark.gluten.sql.columnar.backend.ch.runtime_config.hdfs.libhdfs3_conf=${KYLIN_HOME}/hadoop_conf/hdfs-site.xml
+
+kylin.storage.default-storage-type=3
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/conf/ssh/ssh.conf 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/ssh/ssh.conf
new file mode 100644
index 0000000000..c06c8efabc
--- /dev/null
+++ b/dev-support/release-manager/standalone-docker/all-in-one/conf/ssh/ssh.conf
@@ -0,0 +1,2 @@
+Host *
+       StrictHostKeyChecking no
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/conf/ssh/sshd.conf 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/ssh/sshd.conf
new file mode 100644
index 0000000000..ceca6913ee
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/ssh/sshd.conf
@@ -0,0 +1 @@
+ListenAddress 0.0.0.0
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/conf/zookeeper/zoo.cfg
 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/zookeeper/zoo.cfg
new file mode 100644
index 0000000000..f7aad4b05e
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/conf/zookeeper/zoo.cfg
@@ -0,0 +1,36 @@
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just
+# example sakes.
+dataDir=/data/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+# the maximum number of client connections.
+# increase this if you need to handle more clients
+#maxClientCnxns=60
+#
+# Be sure to read the maintenance section of the
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1
+
+## Metrics Providers
+#
+# https://prometheus.io Metrics Exporter
+#metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider
+#metricsProvider.httpPort=7000
+#metricsProvider.exportJvmInfo=true
+
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/dev-docker/Dockerfile_kylin_dev
 
b/dev-support/release-manager/standalone-docker/all-in-one/dev-docker/Dockerfile_kylin_dev
similarity index 100%
rename from 
dev-support/release-manager/standalone-docker/all_in_one/dev-docker/Dockerfile_kylin_dev
rename to 
dev-support/release-manager/standalone-docker/all-in-one/dev-docker/Dockerfile_kylin_dev
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/dev-docker/build_and_run.sh
 
b/dev-support/release-manager/standalone-docker/all-in-one/dev-docker/build_and_run.sh
similarity index 100%
rename from 
dev-support/release-manager/standalone-docker/all_in_one/dev-docker/build_and_run.sh
rename to 
dev-support/release-manager/standalone-docker/all-in-one/dev-docker/build_and_run.sh
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/dev-docker/entrypoint-dev.sh
 
b/dev-support/release-manager/standalone-docker/all-in-one/dev-docker/entrypoint-dev.sh
similarity index 100%
rename from 
dev-support/release-manager/standalone-docker/all_in_one/dev-docker/entrypoint-dev.sh
rename to 
dev-support/release-manager/standalone-docker/all-in-one/dev-docker/entrypoint-dev.sh
diff --git a/dev-support/release-manager/standalone-docker/all-in-one/run.sh 
b/dev-support/release-manager/standalone-docker/all-in-one/run.sh
new file mode 100755
index 0000000000..755dc67993
--- /dev/null
+++ b/dev-support/release-manager/standalone-docker/all-in-one/run.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+TAG=5.0.0-GA
+
+docker run -d \
+    --name Kylin5-Machine \
+    --hostname localhost \
+    -e TZ=UTC \
+    -m 10G \
+    -p 7070:7070 \
+    -p 8088:8088 \
+    -p 9870:9870 \
+    -p 8032:8032 \
+    -p 8042:8042 \
+    -p 2181:2181 \
+    apachekylin/apache-kylin-standalone:${TAG}
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/scripts/entrypoint.sh
 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/entrypoint.sh
new file mode 100644
index 0000000000..89fc7333ce
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/entrypoint.sh
@@ -0,0 +1,139 @@
+#!/bin/bash
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+FLAG_INITIALIZED="/home/kylin/initialized"
+TIMEOUT=600
+
+function run_command {
+    local STEP="$1"
+    shift 1
+
+    echo ""
+    echo 
"==============================================================================="
+    echo 
"*******************************************************************************"
+    echo "|"
+    echo "|   $STEP at $(date)"
+    echo "|   Command: $@"
+    echo "|"
+    "$@" 2>&1
+
+    local EC=$?
+    if [ $EC != 0 ]; then
+        echo "ERROR!!"
+        echo "[$STEP] Command FAILED : $@, please check!!!"
+        sleep 7200
+        exit $EC
+    else
+        echo "[$STEP] succeed."
+    fi
+}
+
+function check_and_monitor_status() {
+    local COMPONENT="$1"
+    shift 1
+    echo "Checking $COMPONENT's status..."
+    component_status=
+    ((time_left = TIMEOUT))
+    while ((time_left > 0)); do
+        sleep 10
+        "$@" 2>&1
+        component_status=$?
+        if [[ $component_status -eq 0 ]]; then
+            echo "+"
+            break
+        else
+            echo "-"
+        fi
+        ((timeLeft -= 10))
+    done
+    if [[ $component_status -eq 0 ]]; then
+        echo "Check $COMPONENT succeed."
+    else
+        echo "ERROR: check $COMPONENT failed."
+    fi
+    return $component_status
+}
+
+function check_hive_port() {
+    local port=$1
+    if [[ $(lsof -i :$port) == *"LISTEN"* ]]; then
+        return 0
+    else
+        return 1
+    fi
+}
+
+# clean pid files
+rm -f /tmp/*.pid
+rm -rf /data/zookeeper/*
+rm -f /data/zookeeper/zookeeper_server.pid
+
+##############################################
+
+# start ssh
+run_command "Start SSH server" /etc/init.d/ssh start
+
+# start mysql
+run_command "Start MySQL" service mysql start
+
+# env init
+if [ ! -f $FLAG_INITIALIZED ]; then
+    run_command "Create Database kylin" mysql -uroot -p123456 -e "CREATE 
DATABASE IF NOT EXISTS kylin default charset utf8mb4 COLLATE 
utf8mb4_general_ci;"
+    run_command "Create Database hive3" mysql -uroot -p123456 -e "CREATE 
DATABASE IF NOT EXISTS hive3 default charset utf8mb4 COLLATE 
utf8mb4_general_ci;"
+    run_command "Init Hive" schematool -initSchema -dbType mysql
+    run_command "Format HDFS" hdfs namenode -format
+fi
+
+# start zookeeper
+run_command "Start Zookeeper" "$ZOOKEEPER_HOME"/bin/zkServer.sh start
+
+# start hadoop
+run_command "Start Hadoop" "$HADOOP_HOME"/sbin/start-all.sh
+
+# start job history server
+run_command "Start History Server" "$HADOOP_HOME"/sbin/start-historyserver.sh
+
+# start hive metastore & hiveserver2
+run_command "Start Hive metastore" "$HIVE_HOME"/bin/start-hivemetastore.sh
+check_and_monitor_status "Check Hive metastore" check_hive_port 9083
+run_command "Start Hive server" "$HIVE_HOME"/bin/start-hiveserver2.sh
+check_and_monitor_status "Check Hive server" check_hive_port 10000
+
+sleep 10
+
+# pre-running initializing
+if [ ! -f $FLAG_INITIALIZED ]
+then
+    mkdir -p "$KYLIN_HOME"/logs
+    hdfs dfs -mkdir -p /kylin
+    run_command "Prepare sample data" "$KYLIN_HOME"/bin/sample.sh
+fi
+
+# start kylin
+run_command "Kylin ENV bypass" touch $KYLIN_HOME/bin/check-env-bypass
+run_command "Start Kylin Instance" "$KYLIN_HOME"/bin/kylin.sh -v start
+
+check_and_monitor_status "Check Env Script" ls $KYLIN_HOME/bin/check-env-bypass
+check_and_monitor_status "Kylin Instance" grep -c "Initialized Spark" 
$KYLIN_HOME/logs/kylin.log
+
+touch $FLAG_INITIALIZED
+echo "Kylin service is already available for you to preview."
+
+# keep docker running
+sleep infinity
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/scripts/install_mysql.sh
 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/install-mysql.sh
similarity index 96%
rename from 
dev-support/release-manager/standalone-docker/all_in_one/scripts/install_mysql.sh
rename to 
dev-support/release-manager/standalone-docker/all-in-one/scripts/install-mysql.sh
index 298d21ae50..fcadc02ff1 100644
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/scripts/install_mysql.sh
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/install-mysql.sh
@@ -20,4 +20,4 @@
 export DEBIAN_FRONTEND="noninteractive"
 debconf-set-selections <<< 'mysql-server mysql-server/root_password password 
123456'
 debconf-set-selections <<< 'mysql-server mysql-server/root_password_again 
password 123456'
-apt-get install -y mysql-server
+apt install -y mysql-server
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/scripts/start-historyserver.sh
 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/start-historyserver.sh
new file mode 100644
index 0000000000..de9e401487
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/start-historyserver.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+bash $HADOOP_HOME/sbin/mr-jobhistory-daemon.sh --config 
$HADOOP_HOME/etc/hadoop start historyserver
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/scripts/start-hivemetastore.sh
 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/start-hivemetastore.sh
new file mode 100644
index 0000000000..b235a03a8b
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/start-hivemetastore.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+log_dir="$HIVE_HOME/logs"
+
+if [ ! -d "$log_dir" ]; then
+    mkdir -p $log_dir
+fi
+
+nohup $HIVE_HOME/bin/hive --service metastore > "$log_dir/hivemetastore.out" 
2>&1 &
diff --git 
a/dev-support/release-manager/standalone-docker/all-in-one/scripts/start-hiveserver2.sh
 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/start-hiveserver2.sh
new file mode 100644
index 0000000000..04bd2511d1
--- /dev/null
+++ 
b/dev-support/release-manager/standalone-docker/all-in-one/scripts/start-hiveserver2.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+log_dir="$HIVE_HOME/logs"
+
+if [ ! -d "$log_dir" ]; then
+    mkdir -p $log_dir
+fi
+
+nohup $HIVE_HOME/bin/hive --service hiveserver2 > "$log_dir/hiveserver2.out" 
2>&1 &
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/Dockerfile_hadoop 
b/dev-support/release-manager/standalone-docker/all_in_one/Dockerfile_hadoop
deleted file mode 100644
index 84143f1a0f..0000000000
--- a/dev-support/release-manager/standalone-docker/all_in_one/Dockerfile_hadoop
+++ /dev/null
@@ -1,73 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Docker image with Hadoop/JDK/ZK/HIVE/ installed
-FROM ubuntu:20.04
-
-# update system tools
-RUN apt-get update && apt-get -y install sudo && apt-get -y install wget && 
apt-get -y install vim
-
-ENV HADOOP_VERSION 3.2.1
-ENV ZK_VERSION 3.7.1
-ENV HIVE_VERSION 3.1.2
-ENV DEBIAN_FRONTEND=noninteractive
-ENV HADOOP_HOME /opt/hadoop-$HADOOP_VERSION
-ENV HADOOP_CONF $HADOOP_HOME/etc/hadoop
-ENV ZK_HOME /opt/apache-zookeeper-$ZK_VERSION-bin
-ENV HIVE_HOME /opt/apache-hive-$HIVE_VERSION-bin
-ENV PATH $PATH:$JAVA_HOME/bin:$ZK_HOME/bin:$HADOOP_HOME/bin:$HIVE_HOME/bin:
-
-USER root
-CMD /bin/bash
-# change workdir to install Hadoop|JDK|Zookeeper|HIVE
-WORKDIR /opt
-
-# setup jdk
-RUN apt-get -y install openjdk-8-jre \
-    && wget 
https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java_8.0.30-1ubuntu20.04_all.deb
 \
-    && dpkg -i mysql-connector-java_8.0.30-1ubuntu20.04_all.deb \
-    && cp /usr/share/java/mysql-connector-java-8.0.30.jar 
/usr/share/java/mysql-connector-java.jar
-
-ENV JAVA_HOME /usr/lib/jvm/java-1.8.0-openjdk-amd64
-
-## install mysql
-COPY scripts/install_mysql.sh /opt/
-RUN bash /opt/install_mysql.sh
-
-## setup hadoop
-RUN wget 
https://archive.apache.org/dist/hadoop/core/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz
 \
-    && tar -zxf /opt/hadoop-$HADOOP_VERSION.tar.gz \
-    && rm -f /opt/hadoop-$HADOOP_VERSION.tar.gz \
-    && mkdir -p /data/hadoop
-COPY conf/hadoop/* $HADOOP_CONF/
-
-## setup zk
-RUN wget 
https://archive.apache.org/dist/zookeeper/zookeeper-$ZK_VERSION/apache-zookeeper-$ZK_VERSION-bin.tar.gz
 \
-    && tar -zxf /opt/apache-zookeeper-$ZK_VERSION-bin.tar.gz \
-    && rm -f /opt/apache-zookeeper-$ZK_VERSION-bin.tar.gz \
-    && mkdir -p /data/zookeeper
-COPY conf/zk/zoo.cfg $ZK_HOME/conf/
-
-## setup hive
-RUN wget 
https://archive.apache.org/dist/hive/hive-$HIVE_VERSION/apache-hive-$HIVE_VERSION-bin.tar.gz
 \
-    && tar -zxf /opt/apache-hive-$HIVE_VERSION-bin.tar.gz \
-    && rm -f /opt/apache-hive-$HIVE_VERSION-bin.tar.gz \
-    && cp /usr/share/java/mysql-connector-java.jar $HIVE_HOME/lib/ \
-    && rm -f $HIVE_HOME/lib/guava-19.0.jar \
-    && cp $HADOOP_HOME/share/hadoop/common/lib/guava-27.0-jre.jar 
$HIVE_HOME/lib/
-COPY conf/hive/hive-site.xml $HIVE_HOME/conf/
-COPY conf/hive/hive-site.xml $HADOOP_CONF/
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/Dockerfile_kylin 
b/dev-support/release-manager/standalone-docker/all_in_one/Dockerfile_kylin
deleted file mode 100644
index e76aea2715..0000000000
--- a/dev-support/release-manager/standalone-docker/all_in_one/Dockerfile_kylin
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Docker image for apache kylin, based on the Hadoop image
-# FROM hadoop3.2.1-all-in-one-for-kylin5
-FROM apachekylin/apache-kylin-standalone:5.x-base-dev-only
-
-USER root
-
-RUN apt-get -y install curl
-RUN apt-get update && apt-get -y install openjdk-8-jdk
-
-# make a new workdir
-RUN mkdir -p /home/kylin
-
-# change workdir to install Kylin
-WORKDIR /home/kylin
-
-ENV KYLIN_VERSION 5.0.0-beta
-ENV KYLIN_HOME /home/kylin/apache-kylin-$KYLIN_VERSION-bin
-
-COPY package/apache-kylin-$KYLIN_VERSION-*.tar.gz /home/kylin/
-
-RUN tar -zxf /home/kylin/apache-kylin-$KYLIN_VERSION-*.tar.gz \
-    && rm -f /home/kylin/apache-kylin-$KYLIN_VERSION-*.tar.gz \
-    && cp $HIVE_HOME/lib/mysql-connector-java.jar $KYLIN_HOME/lib/ext/ \
-    && cp $HIVE_HOME/lib/mysql-connector-java.jar $KYLIN_HOME/spark/hive_1_2_2/
-
-COPY conf/kylin/kylin.properties.override $KYLIN_HOME/conf/
-
-COPY ./scripts/entrypoint.sh /home/kylin/entrypoint.sh
-RUN chmod u+x /home/kylin/entrypoint.sh
-
-ENTRYPOINT ["/home/kylin/entrypoint.sh"]
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/build_and_run.sh 
b/dev-support/release-manager/standalone-docker/all_in_one/build_and_run.sh
deleted file mode 100755
index fa32c8d22b..0000000000
--- a/dev-support/release-manager/standalone-docker/all_in_one/build_and_run.sh
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-TAG=5-dev
-
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd ${DIR} || exit
-echo "build image in dir "${DIR}
-
-echo "package kylin in local for building image"
-if [[ ! -d ${DIR}/package/ ]]; then
-    mkdir -p ${DIR}/package/
-fi
-
-# The official package didn't carry with Spark binary,
-# So I download it in my laptop, uncompress, execute download-spark-user.sh 
and re-compress
-#
-# wget 
https://archive.apache.org/dist/kylin/apache-kylin-5.0.0-beta/apache-kylin-5.0.0-beta-bin.tar.gz
 -P ${DIR}/package/
-# tar zxf apache-kylin-5.0.0-beta-bin.tar.gz
-# cd apache-kylin-5.0.0-beta-bin
-# bash sbin/download-spark-user.sh
-# tar -czf apache-kylin-5.0.0-beta-bin.tar.gz apache-kylin-5.0.0-beta-bin
-
-echo "start to build Hadoop docker image"
-cp ../../tpch-benchmark/scripts/*.sql scripts/
-# docker build -f Dockerfile_hadoop -t hadoop3.2.1-all-in-one-for-kylin5 .
-docker build -f Dockerfile_kylin -t apachekylin/apache-kylin-standalone:${TAG} 
.
-BUILD_RESULT=$?
-
-if [ "$BUILD_RESULT" != "0" ]; then
-  echo "Image build failed, please check"
-  exit 1
-fi
-echo "Image build succeed"
-
-docker image tag docker.io/apachekylin/apache-kylin-standalone:${TAG} 
apachekylin/apache-kylin-standalone:${TAG}
-
-echo "Start this image locally, and push it to dockerhub later."
-docker stop Kylin5-Machine
-docker rm Kylin5-Machine
-
-docker run -d \
-  --name Kylin5-Machine \
-  --hostname Kylin5-Machine \
-  -m 15G \
-  -p 7070:7070 \
-  -p 8088:8088 \
-  -p 9870:9870 \
-  -p 8032:8032 \
-  -p 8042:8042 \
-  -p 2181:2181 \
-  -p 3306:3306 \
-  -p 9000:9000 \
-  -p 9864:9864 \
-  -p 9866:9866 \
-  -p 9867:9867 \
-  -p 8030:8030 \
-  -p 8031:9867 \
-  -p 8033:9867 \
-    -p 8040:9867 \
-    -p 8040:9867 \
-
-
-  apachekylin/apache-kylin-standalone:${TAG}
-
-docker logs --follow Kylin5-Machine
\ No newline at end of file
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/capacity-scheduler.xml
 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/capacity-scheduler.xml
deleted file mode 100644
index 8f016e2c3b..0000000000
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/capacity-scheduler.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<configuration>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>4</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.5</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    
<value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <description>
-      The ResourceCalculator implementation to be used to compare 
-      Resources in the scheduler.
-      The default i.e. DefaultResourceCalculator only uses Memory while
-      DominantResourceCalculator uses dominant-resource to compare 
-      multi-dimensional resources such as Memory, CPU etc.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the 
CapacityScheduler 
-      attempts to schedule rack-local containers. 
-      Typically this should be set to number of nodes in the cluster, By 
default is setting 
-      approximately number of nodes in one rack which is 40.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.queue-mappings</name>
-    <value></value>
-    <description>
-      A list of mappings that will be used to assign jobs to queues
-      The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
-      Typically this list will be used to map users to queues,
-      for example, u:%user:%user maps all users to queues with the same name
-      as the user.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
-    <value>false</value>
-    <description>
-      If a queue mapping is present, will it override the value specified
-      by the user? This can be used by administrators to place jobs in queues
-      that are different than the one specified by the user.
-      The default is false.
-    </description>
-  </property>
-
-</configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/hdfs-site.xml
 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/hdfs-site.xml
deleted file mode 100644
index cd4edeedd3..0000000000
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/hdfs-site.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-    <property>
-        <name>dfs.replication</name>
-        <value>1</value>
-    </property>
-    <property>
-        <name>dfs.datanode.address</name>
-        <value>kylin5-machine:9866</value>
-    </property>
-    <property>
-        <name>dfs.namenode.name.dir</name>
-        <value>/data/hadoop/dfs/name</value>
-    </property>
-    <property>
-        <name>dfs.datanode.data.dir</name>
-        <value>/data/hadoop/dfs/data</value>
-    </property>
-</configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/mapred-site.xml
 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/mapred-site.xml
deleted file mode 100644
index ac8ef33969..0000000000
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop-dev/mapred-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-    http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-    <property>
-        <name>mapreduce.framework.name</name>
-        <value>yarn</value>
-    </property>
-</configuration>
\ No newline at end of file
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/capacity-scheduler.xml
 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/capacity-scheduler.xml
deleted file mode 100644
index 8f016e2c3b..0000000000
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/capacity-scheduler.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<configuration>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>4</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.5</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.resource-calculator</name>
-    
<value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
-    <description>
-      The ResourceCalculator implementation to be used to compare 
-      Resources in the scheduler.
-      The default i.e. DefaultResourceCalculator only uses Memory while
-      DominantResourceCalculator uses dominant-resource to compare 
-      multi-dimensional resources such as Memory, CPU etc.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      Number of missed scheduling opportunities after which the 
CapacityScheduler 
-      attempts to schedule rack-local containers. 
-      Typically this should be set to number of nodes in the cluster, By 
default is setting 
-      approximately number of nodes in one rack which is 40.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.queue-mappings</name>
-    <value></value>
-    <description>
-      A list of mappings that will be used to assign jobs to queues
-      The syntax for this list is [u|g]:[name]:[queue_name][,next mapping]*
-      Typically this list will be used to map users to queues,
-      for example, u:%user:%user maps all users to queues with the same name
-      as the user.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
-    <value>false</value>
-    <description>
-      If a queue mapping is present, will it override the value specified
-      by the user? This can be used by administrators to place jobs in queues
-      that are different than the one specified by the user.
-      The default is false.
-    </description>
-  </property>
-
-</configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/mapred-site.xml
 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/mapred-site.xml
deleted file mode 100644
index ac8ef33969..0000000000
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/mapred-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-    http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-    <property>
-        <name>mapreduce.framework.name</name>
-        <value>yarn</value>
-    </property>
-</configuration>
\ No newline at end of file
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/yarn-site.xml
 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/yarn-site.xml
deleted file mode 100644
index 73a38f87ad..0000000000
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hadoop/yarn-site.xml
+++ /dev/null
@@ -1,56 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<configuration>
-    <property>
-        <name>yarn.resourcemanager.scheduler.class</name>
-        
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
-    </property>
-    <property>
-        <name>yarn.scheduler.minimum-allocation-mb</name>
-        <value>256</value>
-    </property>
-    <property>
-        <name>yarn.scheduler.maximum-allocation-mb</name>
-        <value>2048</value>
-    </property>
-    <property>
-        <name>yarn.nodemanager.vmem-check-enabled</name>
-        <value>false</value>
-    </property>
-    <property>
-        <name>yarn.nodemanager.resource.memory-mb</name>
-        <value>8192</value>
-    </property>
-    <property>
-        <name>yarn.nodemanager.resource.cpu-vcores</name>
-        <value>8</value>
-    </property>
-    <property>
-        <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-        <value>0.6</value>
-    </property>
-    <property>
-        
<name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
-        <value>98.5</value>
-    </property>
-    <property>
-        <name>yarn.nodemanager.aux-services</name>
-        <value>mapreduce_shuffle</value>
-    </property>
-    <property>
-        <name>yarn.resourcemanager.zk-address</name>
-        <value>localhost:2181</value>
-    </property>
-</configuration>
\ No newline at end of file
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hive/hive-site.xml
 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/hive/hive-site.xml
deleted file mode 100644
index 0f19649ded..0000000000
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/hive/hive-site.xml
+++ /dev/null
@@ -1,44 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-    <property>
-        <name>hive.metastore.db.type</name>
-        <value>mysql</value>
-    </property>
-    <property>
-        <name>javax.jdo.option.ConnectionURL</name>
-        
<value>jdbc:mysql://localhost:3306/hive?createDatabaseIfNotExist=true</value>
-    </property>
-    <property>
-        <name>javax.jdo.option.ConnectionDriverName</name>
-        <value>com.mysql.jdbc.Driver</value>
-    </property>
-    <property>
-        <name>javax.jdo.option.ConnectionUserName</name>
-        <value>root</value>
-    </property>
-    <property>
-        <name>javax.jdo.option.ConnectionPassword</name>
-        <value>123456</value>
-    </property>
-    <property>
-        <name>hive.metastore.schema.verification</name>
-        <value>false</value>
-    </property>
-</configuration>
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/kylin-dev/kylin.properties.override
 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/kylin-dev/kylin.properties.override
deleted file mode 100644
index b5acf325d6..0000000000
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/kylin-dev/kylin.properties.override
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# The http session will timeout in 3 hours
-spring.session.store-type=JDBC
-spring.session.timeout=10800
-
-# Turn off query cache
-kylin.query.cache-enabled=false
-
-server.port=7070
-kylin.query.init-sparder-async=false
-
-kylin.env.apache-hadoop-conf-dir=/opt/hadoop-3.2.1/etc/hadoop
-kylin.env.apache-hive-conf-dir=/opt/apache-hive-3.1.2-bin/conf
-
-# The metastore connection information
-kylin.metadata.url=kylin@jdbc,driverClassName=com.mysql.jdbc.Driver,url=jdbc:mysql://localhost:3306/kylin?useUnicode=true&characterEncoding=utf8,username=root,password=123456,maxTotal=50,maxIdle=8
-
-kylin.env.zookeeper-connect-string=localhost:2181
-kylin.env.hdfs-working-dir=/kylin
-
-# The yarn resource used by query engine(spark session)
-kylin.storage.columnar.spark-conf.spark.driver.memory=512M
-kylin.storage.columnar.spark-conf.spark.driver.memoryOverhead=256M
-kylin.storage.columnar.spark-conf.spark.executor.cores=1
-kylin.storage.columnar.spark-conf.spark.executor.instances=1
-kylin.storage.columnar.spark-conf.spark.executor.memory=2048M
-kylin.storage.columnar.spark-conf.spark.executor.memoryOverhead=256M
-
-# The yarn resource used by build engine(spark session)
-kylin.engine.driver-memory-strategy=2,500
-kylin.engine.spark-conf.spark.driver.memory=512M
-kylin.engine.spark-conf.spark.driver.memoryOverhead=256M
-kylin.engine.spark-conf.spark.executor.cores=1
-kylin.engine.spark-conf.spark.executor.instances=2
-kylin.engine.spark-conf.spark.executor.memory=2048M
-kylin.engine.spark-conf.spark.executor.memoryOverhead=512M
-
-# The yarn resource used by query engine(spark session) Async Query
-kylin.query.async-query.spark-conf.spark.executor.cores=1
-kylin.query.async-query.spark-conf.spark.driver.memory=1024M
-kylin.query.async-query.spark-conf.spark.executor.memory=1024M
-kylin.query.async-query.spark-conf.spark.executor.instances=1
-kylin.query.async-query.spark-conf.spark.executor.memoryOverhead=512M
-
-# Turn off the password force reset function
-kylin.metadata.random-admin-password.enabled=false
-
-kylin.query.engine.push-down.enable-prepare-statement-with-params=true
-kylin.query.calcite.extras-props.FUN=standard,oracle
-kylin.circuit-breaker.threshold.project=500
-kylin.engine.resource-request-over-limit-proportion=3.0
-
-# Kylin can only run two jobs at the same time
-kylin.job.max-concurrent-jobs=2
-
-# If this switch is turned on, Kylin will check node's available memory, if 
remained
-# memory is not fulfilled, job will be hanged.
-# In some cases, the Kylin will refuse to start any build job by this check 
logic.
-kylin.job.auto-set-concurrent-jobs=false
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/mysql/my.cnf 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/mysql/my.cnf
deleted file mode 100644
index a4e30476d6..0000000000
--- a/dev-support/release-manager/standalone-docker/all_in_one/conf/mysql/my.cnf
+++ /dev/null
@@ -1,4 +0,0 @@
-[mysql]
-default-character-set=utf8mb4
-[client]
-default-character-set=utf8mb4
\ No newline at end of file
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/conf/zk/zoo.cfg 
b/dev-support/release-manager/standalone-docker/all_in_one/conf/zk/zoo.cfg
deleted file mode 100644
index 1a576decbb..0000000000
--- a/dev-support/release-manager/standalone-docker/all_in_one/conf/zk/zoo.cfg
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# The number of milliseconds of each tick
-tickTime=2000
-# The number of ticks that the initial 
-# synchronization phase can take
-initLimit=10
-# The number of ticks that can pass between 
-# sending a request and getting an acknowledgement
-syncLimit=5
-# the directory where the snapshot is stored.
-# do not use /tmp for storage, /tmp here is just 
-# example sakes.
-dataDir=/data/zookeeper
-# the port at which the clients will connect
-clientPort=2181
-# the maximum number of client connections.
-# increase this if you need to handle more clients
-#maxClientCnxns=60
-#
-# Be sure to read the maintenance section of the 
-# administrator guide before turning on autopurge.
-#
-# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
-#
-# The number of snapshots to retain in dataDir
-#autopurge.snapRetainCount=3
-# Purge task interval in hours
-# Set to "0" to disable auto purge feature
-#autopurge.purgeInterval=1
diff --git 
a/dev-support/release-manager/standalone-docker/all_in_one/scripts/entrypoint.sh
 
b/dev-support/release-manager/standalone-docker/all_in_one/scripts/entrypoint.sh
deleted file mode 100644
index e503e16cf6..0000000000
--- 
a/dev-support/release-manager/standalone-docker/all_in_one/scripts/entrypoint.sh
+++ /dev/null
@@ -1,121 +0,0 @@
-#!/bin/bash
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-echo "127.0.0.1 sandbox" >> /etc/hosts
-START_FLAG="/home/kylin/first_run"
-TIMEOUT=600
-
-function run_command {
-  local STEP="$1"
-  shift 1
-
-  echo ""
-  echo 
"==============================================================================="
-  echo 
"*******************************************************************************"
-  echo "|"
-  echo "|   $STEP at $(date)"
-  echo "|   Command: $@"
-  echo "|"
-  "$@" 2>&1
-
-  local EC=$?
-  if [ $EC != 0 ]; then
-    echo "ERROR!!"
-    echo "[$STEP] Command FAILED : $@, please check!!!"
-    sleep 7200
-    exit $EC
-  else
-    echo "[$STEP] succeed."
-  fi
-}
-
-function check_and_monitor_status() {
-  local COMPONENT="$1"
-  shift 1
-  echo "Checking $COMPONENT's status..."
-  component_status=
-  ((time_left = TIMEOUT))
-  while ((time_left > 0)); do
-      sleep 10
-      "$@" 2>&1
-      component_status=$?
-      if [[ $component_status -eq 0 ]]; then
-          echo "+"
-          break
-      else
-          echo "-"
-      fi
-      ((timeLeft -= 10))
-  done
-  if [[ $component_status -eq 0 ]]; then
-      echo "Check $COMPONENT succeed."
-  else
-      echo "ERROR: check $COMPONENT failed."
-  fi
-  return $component_status
-}
-
-# clean pid files
-rm -f /tmp/*.pid
-rm -rf /data/zookeeper/*
-rm -f /data/zookeeper/zookeeper_server.pid
-
-##############################################
-
-run_command "Start MySQL" service mysql start
-if [ ! -f "/home/kylin/first_run" ]
-then
-    run_command "Create Database" mysql -uroot -p123456 -e "CREATE DATABASE IF 
NOT EXISTS kylin default charset utf8mb4 COLLATE utf8mb4_general_ci;"
-    run_command "Init Hive" schematool -initSchema -dbType mysql
-    run_command "Format HDFS" hdfs namenode -format
-fi
-
-run_command "Start HDFS [NameNode]" hdfs --daemon start namenode
-run_command "Start HDFS [DataNode]" hdfs --daemon start datanode
-
-# start yarn
-run_command "Start Yarn [ResourceManager]" yarn --daemon start resourcemanager
-run_command "Start Yarn [NodeManager]" yarn --daemon start nodemanager
-
-# start job history server
-# run_command "" mapred --daemon start historyserver
-
-run_command "Start Zookeeper" "$ZK_HOME"/bin/zkServer.sh start
-
-sleep 10s
-
-if [ ! -f $START_FLAG ]
-then
-    check_and_monitor_status "HDFS Usability" hadoop fs -mkdir /tmp
-    mkdir -p "$KYLIN_HOME"/logs
-    hdfs dfs -mkdir -p /kylin
-    run_command "Prepare sample data" "$KYLIN_HOME"/bin/sample.sh
-fi
-
-
-run_command "Start Kylin Instance" "$KYLIN_HOME"/bin/kylin.sh -v start
-
-check_and_monitor_status "Check Env Script" ls $KYLIN_HOME/bin/check-env-bypass
-check_and_monitor_status "Kylin Instance" cat "$KYLIN_HOME"/logs/kylin.log | 
grep -c "Initialized Spark"
-
-touch $START_FLAG
-echo "Kylin service is already available for you to preview."
-while :
-do
-    sleep 10
-done

Reply via email to