This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new 26b1ef428a0 [branch-2.1](doris compose) fix docker start failed 
(#36534)
26b1ef428a0 is described below

commit 26b1ef428a03bd559d868855ccec4f2eb6d05edb
Author: yujun <yu.jun.re...@gmail.com>
AuthorDate: Thu Jun 20 20:14:17 2024 +0800

    [branch-2.1](doris compose) fix docker start failed (#36534)
---
 docker/runtime/doris-compose/Dockerfile          |  32 +++--
 docker/runtime/doris-compose/Readme.md           |  10 +-
 docker/runtime/doris-compose/cluster.py          |  26 +++-
 docker/runtime/doris-compose/command.py          | 148 +++++++++++++++++++----
 docker/runtime/doris-compose/resource/init_be.sh |   4 +-
 docker/runtime/doris-compose/utils.py            |  28 +++--
 6 files changed, 198 insertions(+), 50 deletions(-)

diff --git a/docker/runtime/doris-compose/Dockerfile 
b/docker/runtime/doris-compose/Dockerfile
index 2306bf67cd2..73561e6410e 100644
--- a/docker/runtime/doris-compose/Dockerfile
+++ b/docker/runtime/doris-compose/Dockerfile
@@ -16,14 +16,30 @@
 # specific language governing permissions and limitations
 # under the License.
 
+#### START ARG ####
+
+# docker build cmd example:
+# docker build -f docker/runtime/doris-compose/Dockerfile -t 
<your-image-name>:<version> .
+
 # choose a base image
-FROM openjdk:8u342-jdk
+ARG JDK_IMAGE=openjdk:17-jdk-slim
+#ARG JDK_IMAGE=openjdk:8u342-jdk
+
+#### END ARG ####
+
+FROM ${JDK_IMAGE}
 
-ARG OUT_DIRECTORY=output
+RUN <<EOF
+    if [ -d "/usr/local/openjdk-17" ]; then
+        ln -s /usr/local/openjdk-17  /usr/local/openjdk
+    else \
+        ln -s /usr/local/openjdk-8  /usr/local/openjdk
+    fi
+EOF
 
 # set environment variables
-ENV JAVA_HOME="/usr/local/openjdk-8/"
-ENV jacoco_version 0.8.8
+ENV JAVA_HOME="/usr/local/openjdk"
+ENV JACOCO_VERSION 0.8.8
 
 RUN mkdir -p /opt/apache-doris/coverage
 
@@ -31,17 +47,17 @@ RUN  sed -i s@/deb.debian.org/@/mirrors.aliyun.com/@g 
/etc/apt/sources.list
 RUN  apt-get clean
 
 RUN apt-get update && \
-    apt-get install -y default-mysql-client python lsof tzdata curl unzip 
patchelf jq && \
+    apt-get install -y default-mysql-client python lsof tzdata curl unzip 
patchelf jq procps && \
     ln -fs /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && \
     dpkg-reconfigure -f noninteractive tzdata && \
     apt-get clean
 
-RUN curl -f 
https://repo1.maven.org/maven2/org/jacoco/jacoco/$jacoco_version/jacoco-$jacoco_version.zip
 -o jacoco.zip && \
+RUN curl -f 
https://repo1.maven.org/maven2/org/jacoco/jacoco/${JACOCO_VERSION}/jacoco-${JACOCO_VERSION}.zip
 -o jacoco.zip && \
     mkdir /jacoco && \
     unzip jacoco.zip -d /jacoco
 
 # cloud
-COPY ${OUT_DIRECTORY}/../cloud/CMakeLists.txt 
${OUT_DIRECTORY}/../cloud/output* /opt/apache-doris/cloud/
+#COPY cloud/CMakeLists.txt cloud/output* output/ms* /opt/apache-doris/cloud/
 RUN <<EOF
     mkdir /opt/apache-doris/fdb
     if [ -d /opt/apache-doris/cloud/bin ]; then
@@ -50,7 +66,7 @@ RUN <<EOF
 EOF
 
 # fe and be
-COPY $OUT_DIRECTORY /opt/apache-doris/
+COPY output /opt/apache-doris/
 # in docker, run 'chmod 755 doris_be' first time cost 1min, remove it.
 RUN sed -i 's/\<chmod\>/echo/g' /opt/apache-doris/be/bin/start_be.sh
 
diff --git a/docker/runtime/doris-compose/Readme.md 
b/docker/runtime/doris-compose/Readme.md
index cd3d7805fe8..a83fa81e761 100644
--- a/docker/runtime/doris-compose/Readme.md
+++ b/docker/runtime/doris-compose/Readme.md
@@ -118,8 +118,16 @@ python docker/runtime/doris-compose/doris-compose.py  
<command> -h
 ### Generate regression custom conf file
 
 ```
-python docker/runtime/doris-compose/doris-compose.py config <cluster-name>
+python docker/runtime/doris-compose/doris-compose.py config <cluster-name>  
<doris-root-path>  [-q]  [--connect-follow-fe]
 ```
 
 Generate regression-conf-custom.groovy to connect to the specific docker 
cluster.
 
+### Setup cloud multi clusters test env
+
+steps:
+
+1. Create a new cluster:  `python doris-compose.py up my-cluster  my-image  
--add-fe-num 2  --add-be-num 4 --cloud`
+2. Generate regression-conf-custom.groovy: `python doris-compose.py config 
my-cluster  <doris-root-path> --connect-follow-fe`
+3. Run regression test: `bash run-regression-test.sh --run -times 1 -parallel 
1 -suiteParallel 1 -d cloud/multi_cluster`
+
diff --git a/docker/runtime/doris-compose/cluster.py 
b/docker/runtime/doris-compose/cluster.py
index cda6c3d845d..5381c094cf2 100644
--- a/docker/runtime/doris-compose/cluster.py
+++ b/docker/runtime/doris-compose/cluster.py
@@ -424,7 +424,7 @@ class BE(Node):
                 "cloud_unique_id = " + self.cloud_unique_id(),
                 "meta_service_endpoint = {}".format(
                     self.cluster.get_meta_server_addr()),
-                'tmp_file_dirs = [ 
{"path":"./storage/tmp","max_cache_bytes":10240000," 
"max_upload_bytes":10240000}]',
+                'tmp_file_dirs = [ 
{"path":"./storage/tmp","max_cache_bytes":10240000, 
"max_upload_bytes":10240000}]',
                 'enable_file_cache = true',
                 'file_cache_path = [ {{"path": "{}/storage/file_cache", 
"total_size":53687091200, "query_limit": 10737418240}}]'
                 .format(self.docker_home_dir()),
@@ -435,6 +435,10 @@ class BE(Node):
         with open("{}/conf/CLUSTER_NAME".format(self.get_path()), "w") as f:
             f.write(self.cluster.be_cluster)
 
+    def get_cluster_name(self):
+        with open("{}/conf/CLUSTER_NAME".format(self.get_path()), "r") as f:
+            return f.read().strip()
+
     def init_disk(self, be_disks):
         path = self.get_path()
         dirs = []
@@ -480,6 +484,7 @@ class BE(Node):
         envs = super().docker_env()
         if self.cluster.is_cloud:
             envs["CLOUD_UNIQUE_ID"] = self.cloud_unique_id()
+            envs["REG_BE_TO_MS"] = 1 if self.cluster.reg_be else 0
         return envs
 
     def cloud_unique_id(self):
@@ -510,7 +515,10 @@ class CLOUD(Node):
         return [MS_PORT]
 
     def conf_file_name(self):
-        return "doris_cloud.conf"
+        for file in os.listdir(os.path.join(self.get_path(), "conf")):
+            if file == "doris_cloud.conf" or file == "selectdb_cloud.conf":
+                return file
+        return "Not found conf file for ms or recycler"
 
 
 class MS(CLOUD):
@@ -590,8 +598,8 @@ class FDB(Node):
 class Cluster(object):
 
     def __init__(self, name, subnet, image, is_cloud, fe_config, be_config,
-                 ms_config, recycle_config, be_disks, be_cluster, coverage_dir,
-                 cloud_store_config):
+                 ms_config, recycle_config, be_disks, be_cluster, reg_be,
+                 coverage_dir, cloud_store_config):
         self.name = name
         self.subnet = subnet
         self.image = image
@@ -602,6 +610,7 @@ class Cluster(object):
         self.recycle_config = recycle_config
         self.be_disks = be_disks
         self.be_cluster = be_cluster
+        self.reg_be = reg_be
         self.coverage_dir = coverage_dir
         self.cloud_store_config = cloud_store_config
         self.groups = {
@@ -611,14 +620,15 @@ class Cluster(object):
 
     @staticmethod
     def new(name, image, is_cloud, fe_config, be_config, ms_config,
-            recycle_config, be_disks, be_cluster, coverage_dir,
+            recycle_config, be_disks, be_cluster, reg_be, coverage_dir,
             cloud_store_config):
         os.makedirs(LOCAL_DORIS_PATH, exist_ok=True)
         with filelock.FileLock(os.path.join(LOCAL_DORIS_PATH, "lock")):
             subnet = gen_subnet_prefix16()
             cluster = Cluster(name, subnet, image, is_cloud, fe_config,
                               be_config, ms_config, recycle_config, be_disks,
-                              be_cluster, coverage_dir, cloud_store_config)
+                              be_cluster, reg_be, coverage_dir,
+                              cloud_store_config)
             os.makedirs(cluster.get_path(), exist_ok=True)
             os.makedirs(get_status_path(name), exist_ok=True)
             cluster._save_meta()
@@ -707,6 +717,10 @@ class Cluster(object):
     def get_meta_server_addr(self):
         return "{}:{}".format(self.get_node(Node.TYPE_MS, 1).get_ip(), MS_PORT)
 
+    def get_recycle_addr(self):
+        return "{}:{}".format(
+            self.get_node(Node.TYPE_RECYCLE, 1).get_ip(), MS_PORT)
+
     def remove(self, node_type, id):
         group = self.get_group(node_type)
         group.remove(id)
diff --git a/docker/runtime/doris-compose/command.py 
b/docker/runtime/doris-compose/command.py
index 7e9cc3df2cb..87ae862236a 100644
--- a/docker/runtime/doris-compose/command.py
+++ b/docker/runtime/doris-compose/command.py
@@ -288,7 +288,7 @@ class UpCommand(Command):
 
         parser.add_argument("--coverage-dir",
                             default="",
-                            help="code coverage output directory")
+                            help="Set code coverage output directory")
 
         parser.add_argument(
             "--fdb-version",
@@ -296,6 +296,37 @@ class UpCommand(Command):
             default="7.1.26",
             help="fdb image version. Only use in cloud cluster.")
 
+        if self._support_boolean_action():
+            parser.add_argument(
+                "--detach",
+                default=True,
+                action=self._get_parser_bool_action(False),
+                help="Detached mode: Run containers in the background. If 
specific --no-detach, "\
+                "will run containers in frontend. ")
+        else:
+            parser.add_argument("--no-detach",
+                                dest='detach',
+                                default=True,
+                                action=self._get_parser_bool_action(False),
+                                help="Run containers in frontend. ")
+
+        if self._support_boolean_action():
+            parser.add_argument(
+                "--reg-be",
+                default=True,
+                action=self._get_parser_bool_action(False),
+                help="Register be to meta server in cloud mode, use for multi 
clusters test. If specific --no-reg-be, "\
+                "will not register be to meta server. ")
+        else:
+            parser.add_argument(
+                "--no-reg-be",
+                dest='reg_be',
+                default=True,
+                action=self._get_parser_bool_action(False),
+                help=
+                "Don't register be to meta server in cloud mode, use for multi 
clusters test"
+            )
+
     def run(self, args):
         if not args.NAME:
             raise Exception("Need specific not empty cluster name")
@@ -356,7 +387,7 @@ class UpCommand(Command):
                                           args.fe_config, args.be_config,
                                           args.ms_config, args.recycle_config,
                                           args.be_disks, args.be_cluster,
-                                          args.coverage_dir,
+                                          args.reg_be, args.coverage_dir,
                                           cloud_store_config)
             LOG.info("Create new cluster {} succ, cluster path is {}".format(
                 args.NAME, cluster.get_path()))
@@ -412,7 +443,9 @@ class UpCommand(Command):
         if not args.start:
             options.append("--no-start")
         else:
-            options = ["-d", "--remove-orphans"]
+            options += ["--remove-orphans"]
+            if args.detach:
+                options.append("-d")
             if args.force_recreate:
                 options.append("--force-recreate")
 
@@ -421,8 +454,12 @@ class UpCommand(Command):
             related_node_num = cluster.get_all_nodes_num()
             related_nodes = None
 
-        utils.exec_docker_compose_command(cluster.get_compose_file(), "up",
-                                          options, related_nodes)
+        output_real_time = args.start and not args.detach
+        utils.exec_docker_compose_command(cluster.get_compose_file(),
+                                          "up",
+                                          options,
+                                          related_nodes,
+                                          output_real_time=output_real_time)
 
         ls_cmd = "python docker/runtime/doris-compose/doris-compose.py ls " + 
cluster.name
         LOG.info("Inspect command: " + utils.render_green(ls_cmd) + "\n")
@@ -708,36 +745,93 @@ class GenConfCommand(Command):
             "config",
             help="Generate regression-conf-custom.groovy for regression test.")
         parser.add_argument("NAME", default="", help="Specific cluster name.")
+        parser.add_argument("DORIS_ROOT_PATH", default="", help="Specify doris 
or selectdb root path, "\
+                "i.e. the parent directory of regression-test.")
+        parser.add_argument("--connect-follow-fe",
+                            default=False,
+                            action=self._get_parser_bool_action(True),
+                            help="Connect to follow fe.")
+        parser.add_argument("-q",
+                            "--quiet",
+                            default=False,
+                            action=self._get_parser_bool_action(True),
+                            help="write config quiet, no need confirm.")
 
         return parser
 
     def run(self, args):
-        content = '''
-jdbcUrl = 
"jdbc:mysql://127.0.0.1:9030/?useLocalSessionState=true&allowLoadLocalInfile=true"
-targetJdbcUrl = 
"jdbc:mysql://127.0.0.1:9030/?useLocalSessionState=true&allowLoadLocalInfile=true"
-feSourceThriftAddress = "127.0.0.1:9020"
-feTargetThriftAddress = "127.0.0.1:9020"
-syncerAddress = "127.0.0.1:9190"
-feHttpAddress = "127.0.0.1:8030"
+        base_conf = '''
+jdbcUrl = 
"jdbc:mysql://{fe_ip}:9030/?useLocalSessionState=true&allowLoadLocalInfile=true"
+targetJdbcUrl = 
"jdbc:mysql://{fe_ip}:9030/?useLocalSessionState=true&allowLoadLocalInfile=true"
+feSourceThriftAddress = "{fe_ip}:9020"
+feTargetThriftAddress = "{fe_ip}:9020"
+syncerAddress = "{fe_ip}:9190"
+feHttpAddress = "{fe_ip}:8030"
+'''
+
+        cloud_conf = '''
+feCloudHttpAddress = "{fe_ip}:18030"
+metaServiceHttpAddress = "{ms_endpoint}"
+metaServiceToken = "greedisgood9999"
+recycleServiceHttpAddress = "{recycle_endpoint}"
+instanceId = "default_instance_id"
+multiClusterInstance = "default_instance_id"
+multiClusterBes = "{multi_cluster_bes}"
+cloudUniqueId= "{fe_cloud_unique_id}"
 '''
-        master_fe_ip = CLUSTER.get_master_fe_endpoint(args.NAME)
-        if not master_fe_ip:
+        cluster = CLUSTER.Cluster.load(args.NAME)
+        master_fe_ip_ep = CLUSTER.get_master_fe_endpoint(args.NAME)
+        if not master_fe_ip_ep:
             print("Not found cluster with name {} in directory {}".format(
                 args.NAME, CLUSTER.LOCAL_DORIS_PATH))
             return
-        doris_root_dir = os.path.abspath(__file__)
-        for i in range(4):
-            doris_root_dir = os.path.dirname(doris_root_dir)
-        regression_conf_custom = doris_root_dir + 
"/regression-test/conf/regression-conf-custom.groovy"
-        if input("write file {} ?\n   y/N:  ".format(
-                regression_conf_custom)) != 'y':
-            print("No write regression custom file.")
-            return
+
+        master_fe_ip = master_fe_ip_ep[:master_fe_ip_ep.find(':')]
+        fe_ip = ""
+        if not args.connect_follow_fe:
+            fe_ip = master_fe_ip
+        else:
+            for fe in cluster.get_all_nodes(CLUSTER.Node.TYPE_FE):
+                if fe.get_ip() == master_fe_ip:
+                    continue
+                else:
+                    fe_ip = fe.get_ip()
+                    break
+            if not fe_ip:
+                raise Exception(
+                    "Not found follow fe, pls add a follow fe use command `up 
<your-cluster> --add-fe-num 1`"
+                )
+
+        relative_custom_file_path = 
"regression-test/conf/regression-conf-custom.groovy"
+        regression_conf_custom = os.path.join(args.DORIS_ROOT_PATH,
+                                              relative_custom_file_path)
+        if not args.quiet:
+            ans = input(
+                "\nwrite file {} ?  y/n: ".format(regression_conf_custom))
+            if ans != 'y':
+                print("\nNo write regression custom file.")
+                return
+
         with open(regression_conf_custom, "w") as f:
-            f.write(
-                content.replace("127.0.0.1",
-                                master_fe_ip[:master_fe_ip.find(':')]))
-        print("Write succ: " + regression_conf_custom)
+            f.write(base_conf.format(fe_ip=fe_ip))
+            if cluster.is_cloud:
+                multi_cluster_bes = ",".join([
+                    "{}:{}:{}:{}:{}".format(be.get_ip(),
+                                            CLUSTER.BE_HEARTBEAT_PORT,
+                                            CLUSTER.BE_WEBSVR_PORT,
+                                            be.cloud_unique_id(),
+                                            CLUSTER.BE_BRPC_PORT)
+                    for be in cluster.get_all_nodes(CLUSTER.Node.TYPE_BE)
+                ])
+                f.write(
+                    cloud_conf.format(
+                        fe_ip=fe_ip,
+                        ms_endpoint=cluster.get_meta_server_addr(),
+                        recycle_endpoint=cluster.get_recycle_addr(),
+                        multi_cluster_bes=multi_cluster_bes,
+                        fe_cloud_unique_id=cluster.get_node(
+                            CLUSTER.Node.TYPE_FE, 1).cloud_unique_id()))
+        print("\nWrite succ: " + regression_conf_custom)
 
 
 class ListCommand(Command):
@@ -907,6 +1001,8 @@ class ListCommand(Command):
                         container.attrs["NetworkSettings"]
                         ["Networks"].values())[0]["IPAMConfig"]["IPv4Address"]
                     node.image = ",".join(container.image.tags)
+                    if not node.image:
+                        node.image = container.attrs["Config"]["Image"]
                     node.container_id = container.short_id
                     node.status = container.status
                     if node.container_id and \
diff --git a/docker/runtime/doris-compose/resource/init_be.sh 
b/docker/runtime/doris-compose/resource/init_be.sh
index 0df464c625c..d9b7953b534 100755
--- a/docker/runtime/doris-compose/resource/init_be.sh
+++ b/docker/runtime/doris-compose/resource/init_be.sh
@@ -146,7 +146,9 @@ add_be_to_cluster() {
     fi
 
     if [ "${IS_CLOUD}" == "1" ]; then
-        add_cloud_be
+        if [ "${REG_BE_TO_MS}" == "1" ]; then
+            add_cloud_be
+        fi
     else
         add_local_be
     fi
diff --git a/docker/runtime/doris-compose/utils.py 
b/docker/runtime/doris-compose/utils.py
index 8b4b39619bc..54255b597bc 100644
--- a/docker/runtime/doris-compose/utils.py
+++ b/docker/runtime/doris-compose/utils.py
@@ -179,25 +179,37 @@ def is_dir_empty(dir):
     return False if os.listdir(dir) else True
 
 
-def exec_shell_command(command, ignore_errors=False):
+def exec_shell_command(command, ignore_errors=False, output_real_time=False):
     LOG.info("Exec command: {}".format(command))
     p = subprocess.Popen(command,
                          shell=True,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.STDOUT)
-    out = p.communicate()[0].decode('utf-8')
+    out = ''
+    exitcode = None
+    if output_real_time:
+        while p.poll() is None:
+            s = p.stdout.readline().decode('utf-8')
+            if ENABLE_LOG and s.rstrip():
+                print(s.rstrip())
+            out += s
+        exitcode = p.wait()
+    else:
+        out = p.communicate()[0].decode('utf-8')
+        exitcode = p.returncode
+        if ENABLE_LOG and out:
+            print(out)
     if not ignore_errors:
-        assert p.returncode == 0, out
-    if ENABLE_LOG and out:
-        print(out)
-    return p.returncode, out
+        assert exitcode == 0, out
+    return exitcode, out
 
 
 def exec_docker_compose_command(compose_file,
                                 command,
                                 options=None,
                                 nodes=None,
-                                user_command=None):
+                                user_command=None,
+                                output_real_time=False):
     if nodes != None and not nodes:
         return 0, "Skip"
 
@@ -206,7 +218,7 @@ def exec_docker_compose_command(compose_file,
         " ".join([node.service_name() for node in nodes]) if nodes else "",
         user_command if user_command else "")
 
-    return exec_shell_command(compose_cmd)
+    return exec_shell_command(compose_cmd, output_real_time=output_real_time)
 
 
 def get_docker_subnets_prefix16():


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to