satishd commented on a change in pull request #10733:
URL: https://github.com/apache/kafka/pull/10733#discussion_r649741237



##########
File path: 
storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java
##########
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.server.log.remote.storage;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.apache.kafka.common.config.ConfigDef.Importance.LOW;
+import static org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM;
+import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
+import static org.apache.kafka.common.config.ConfigDef.Type.BOOLEAN;
+import static org.apache.kafka.common.config.ConfigDef.Type.INT;
+import static org.apache.kafka.common.config.ConfigDef.Type.LONG;
+import static org.apache.kafka.common.config.ConfigDef.Type.STRING;
+
+public final class RemoteLogManagerConfig {
+
+    /**
+     * Prefix used for properties to be passed to {@link RemoteStorageManager} 
implementation. Remote log subsystem collects all the properties having
+     * this prefix and passed to {@code RemoteStorageManager} using {@link 
RemoteStorageManager#configure(Map)}.
+     */
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_PROP = 
"remote.log.storage.manager.impl.prefix";
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteStorageManager " +
+            "implementation. For example this value can be `rsm.s3.`.";
+
+    /**
+     * Prefix used for properties to be passed to {@link 
RemoteLogMetadataManager} implementation. Remote log subsystem collects all the 
properties having
+     * this prefix and passed to {@code RemoteLogMetadataManager} using {@link 
RemoteLogMetadataManager#configure(Map)}.
+     */
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_PROP 
= "remote.log.metadata.manager.impl.prefix";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteLogMetadataManager " +
+            "implementation. For example this value can be `rlmm.s3.`.";
+
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP = 
"remote.log.storage.system.enable";
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_DOC = "Whether 
to enable tier storage functionality in a broker or not. Valid values " +
+            "are `true` or `false` and the default value is false. When it is 
true broker starts all the services required for tiered storage functionality.";
+    public static final boolean DEFAULT_REMOTE_LOG_STORAGE_SYSTEM_ENABLE = 
false;
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP = 
"remote.log.storage.manager.class.name";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_DOC = "Fully 
qualified class name of `RemoteLogStorageManager` implementation.";
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_PROP = 
"remote.log.storage.manager.class.path";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_DOC = "Class 
path of the `RemoteLogStorageManager` implementation." +
+            "If specified, the RemoteLogStorageManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP = 
"remote.log.metadata.manager.class.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_DOC = 
"Fully qualified class name of `RemoteLogMetadataManager` implementation.";
+    //todo add the default topic based RLMM class name.
+    public static final String DEFAULT_REMOTE_LOG_METADATA_MANAGER_CLASS_NAME 
= "";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_PROP = 
"remote.log.metadata.manager.class.path";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_DOC = 
"Class path of the `RemoteLogMetadataManager` implementation." +
+            "If specified, the RemoteLogMetadataManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP 
= "remote.log.metadata.manager.listener.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_DOC = 
"Listener name of the local broker to which it should get connected if " +
+            "needed by RemoteLogMetadataManager implementation.";
+
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP = 
"remote.log.index.file.cache.total.size.bytes";
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_DOC = "The total size of the space 
allocated to store index files fetched " +
+            "from remote storage in the local storage.";
+    public static final long 
DEFAULT_REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES = 1024 * 1024 * 1024L;
+
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP = 
"remote.log.manager.thread.pool.size";
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC = "Size 
of the thread pool used in scheduling tasks to copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";
+    public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 10;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP = 
"remote.log.manager.task.interval.ms";
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_DOC = 
"Interval at which remote log manager runs the scheduled tasks like copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";

Review comment:
       Right, nice catch.

##########
File path: 
storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java
##########
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.server.log.remote.storage;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.apache.kafka.common.config.ConfigDef.Importance.LOW;
+import static org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM;
+import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
+import static org.apache.kafka.common.config.ConfigDef.Type.BOOLEAN;
+import static org.apache.kafka.common.config.ConfigDef.Type.INT;
+import static org.apache.kafka.common.config.ConfigDef.Type.LONG;
+import static org.apache.kafka.common.config.ConfigDef.Type.STRING;
+
+public final class RemoteLogManagerConfig {
+
+    /**
+     * Prefix used for properties to be passed to {@link RemoteStorageManager} 
implementation. Remote log subsystem collects all the properties having
+     * this prefix and passed to {@code RemoteStorageManager} using {@link 
RemoteStorageManager#configure(Map)}.
+     */
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_PROP = 
"remote.log.storage.manager.impl.prefix";
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteStorageManager " +
+            "implementation. For example this value can be `rsm.s3.`.";
+
+    /**
+     * Prefix used for properties to be passed to {@link 
RemoteLogMetadataManager} implementation. Remote log subsystem collects all the 
properties having
+     * this prefix and passed to {@code RemoteLogMetadataManager} using {@link 
RemoteLogMetadataManager#configure(Map)}.
+     */
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_PROP 
= "remote.log.metadata.manager.impl.prefix";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteLogMetadataManager " +
+            "implementation. For example this value can be `rlmm.s3.`.";
+
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP = 
"remote.log.storage.system.enable";
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_DOC = "Whether 
to enable tier storage functionality in a broker or not. Valid values " +
+            "are `true` or `false` and the default value is false. When it is 
true broker starts all the services required for tiered storage functionality.";
+    public static final boolean DEFAULT_REMOTE_LOG_STORAGE_SYSTEM_ENABLE = 
false;
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP = 
"remote.log.storage.manager.class.name";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_DOC = "Fully 
qualified class name of `RemoteLogStorageManager` implementation.";
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_PROP = 
"remote.log.storage.manager.class.path";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_DOC = "Class 
path of the `RemoteLogStorageManager` implementation." +
+            "If specified, the RemoteLogStorageManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP = 
"remote.log.metadata.manager.class.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_DOC = 
"Fully qualified class name of `RemoteLogMetadataManager` implementation.";
+    //todo add the default topic based RLMM class name.
+    public static final String DEFAULT_REMOTE_LOG_METADATA_MANAGER_CLASS_NAME 
= "";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_PROP = 
"remote.log.metadata.manager.class.path";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_DOC = 
"Class path of the `RemoteLogMetadataManager` implementation." +
+            "If specified, the RemoteLogMetadataManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP 
= "remote.log.metadata.manager.listener.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_DOC = 
"Listener name of the local broker to which it should get connected if " +
+            "needed by RemoteLogMetadataManager implementation.";
+
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP = 
"remote.log.index.file.cache.total.size.bytes";
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_DOC = "The total size of the space 
allocated to store index files fetched " +
+            "from remote storage in the local storage.";
+    public static final long 
DEFAULT_REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES = 1024 * 1024 * 1024L;
+
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP = 
"remote.log.manager.thread.pool.size";
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC = "Size 
of the thread pool used in scheduling tasks to copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";
+    public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 10;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP = 
"remote.log.manager.task.interval.ms";
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_DOC = 
"Interval at which remote log manager runs the scheduled tasks like copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_INTERVAL_MS = 30 
* 1000L;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP 
= "remote.log.manager.task.retry.backoff.ms";
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_DOC = 
"It represents the wait time in milli seconds before the request is retried 
again.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS 
= 30 * 1000L;

Review comment:
       It was not intended to be the same.  It is actually 500. 

##########
File path: 
storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java
##########
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.server.log.remote.storage;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.apache.kafka.common.config.ConfigDef.Importance.LOW;
+import static org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM;
+import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
+import static org.apache.kafka.common.config.ConfigDef.Type.BOOLEAN;
+import static org.apache.kafka.common.config.ConfigDef.Type.INT;
+import static org.apache.kafka.common.config.ConfigDef.Type.LONG;
+import static org.apache.kafka.common.config.ConfigDef.Type.STRING;
+
+public final class RemoteLogManagerConfig {
+
+    /**
+     * Prefix used for properties to be passed to {@link RemoteStorageManager} 
implementation. Remote log subsystem collects all the properties having
+     * this prefix and passed to {@code RemoteStorageManager} using {@link 
RemoteStorageManager#configure(Map)}.
+     */
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_PROP = 
"remote.log.storage.manager.impl.prefix";
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteStorageManager " +
+            "implementation. For example this value can be `rsm.s3.`.";
+
+    /**
+     * Prefix used for properties to be passed to {@link 
RemoteLogMetadataManager} implementation. Remote log subsystem collects all the 
properties having
+     * this prefix and passed to {@code RemoteLogMetadataManager} using {@link 
RemoteLogMetadataManager#configure(Map)}.
+     */
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_PROP 
= "remote.log.metadata.manager.impl.prefix";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteLogMetadataManager " +
+            "implementation. For example this value can be `rlmm.s3.`.";
+
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP = 
"remote.log.storage.system.enable";
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_DOC = "Whether 
to enable tier storage functionality in a broker or not. Valid values " +
+            "are `true` or `false` and the default value is false. When it is 
true broker starts all the services required for tiered storage functionality.";
+    public static final boolean DEFAULT_REMOTE_LOG_STORAGE_SYSTEM_ENABLE = 
false;
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP = 
"remote.log.storage.manager.class.name";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_DOC = "Fully 
qualified class name of `RemoteLogStorageManager` implementation.";
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_PROP = 
"remote.log.storage.manager.class.path";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_DOC = "Class 
path of the `RemoteLogStorageManager` implementation." +
+            "If specified, the RemoteLogStorageManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP = 
"remote.log.metadata.manager.class.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_DOC = 
"Fully qualified class name of `RemoteLogMetadataManager` implementation.";
+    //todo add the default topic based RLMM class name.
+    public static final String DEFAULT_REMOTE_LOG_METADATA_MANAGER_CLASS_NAME 
= "";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_PROP = 
"remote.log.metadata.manager.class.path";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_DOC = 
"Class path of the `RemoteLogMetadataManager` implementation." +
+            "If specified, the RemoteLogMetadataManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP 
= "remote.log.metadata.manager.listener.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_DOC = 
"Listener name of the local broker to which it should get connected if " +
+            "needed by RemoteLogMetadataManager implementation.";
+
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP = 
"remote.log.index.file.cache.total.size.bytes";
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_DOC = "The total size of the space 
allocated to store index files fetched " +
+            "from remote storage in the local storage.";
+    public static final long 
DEFAULT_REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES = 1024 * 1024 * 1024L;
+
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP = 
"remote.log.manager.thread.pool.size";
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC = "Size 
of the thread pool used in scheduling tasks to copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";
+    public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 10;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP = 
"remote.log.manager.task.interval.ms";
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_DOC = 
"Interval at which remote log manager runs the scheduled tasks like copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_INTERVAL_MS = 30 
* 1000L;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP 
= "remote.log.manager.task.retry.backoff.ms";
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_DOC = 
"It represents the wait time in milli seconds before the request is retried 
again.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS 
= 30 * 1000L;
+
+    public static final String 
REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_PROP = 
"remote.log.manager.task.retry.backoff.max.ms";
+    public static final String 
REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_DOC = "The maximum amount of time 
in milliseconds to wait when the request " +
+            "is retried again. The retry duration will increase exponentially 
for each request failure up to this maximum wait interval.";
+    public static final long 
DEFAULT_REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS = 30 * 1000L;

Review comment:
       Jitter should be double with the default value of 0.2. It is a factor 
mentioned in 
[ExponentialBackOff](https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/utils/ExponentialBackoff.java#L30).

##########
File path: 
storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java
##########
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.server.log.remote.storage;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.apache.kafka.common.config.ConfigDef.Importance.LOW;
+import static org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM;
+import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
+import static org.apache.kafka.common.config.ConfigDef.Type.BOOLEAN;
+import static org.apache.kafka.common.config.ConfigDef.Type.INT;
+import static org.apache.kafka.common.config.ConfigDef.Type.LONG;
+import static org.apache.kafka.common.config.ConfigDef.Type.STRING;
+
+public final class RemoteLogManagerConfig {
+
+    /**
+     * Prefix used for properties to be passed to {@link RemoteStorageManager} 
implementation. Remote log subsystem collects all the properties having
+     * this prefix and passed to {@code RemoteStorageManager} using {@link 
RemoteStorageManager#configure(Map)}.
+     */
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_PROP = 
"remote.log.storage.manager.impl.prefix";
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteStorageManager " +
+            "implementation. For example this value can be `rsm.s3.`.";
+
+    /**
+     * Prefix used for properties to be passed to {@link 
RemoteLogMetadataManager} implementation. Remote log subsystem collects all the 
properties having
+     * this prefix and passed to {@code RemoteLogMetadataManager} using {@link 
RemoteLogMetadataManager#configure(Map)}.
+     */
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_PROP 
= "remote.log.metadata.manager.impl.prefix";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteLogMetadataManager " +
+            "implementation. For example this value can be `rlmm.s3.`.";
+
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP = 
"remote.log.storage.system.enable";
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_DOC = "Whether 
to enable tier storage functionality in a broker or not. Valid values " +
+            "are `true` or `false` and the default value is false. When it is 
true broker starts all the services required for tiered storage functionality.";
+    public static final boolean DEFAULT_REMOTE_LOG_STORAGE_SYSTEM_ENABLE = 
false;
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP = 
"remote.log.storage.manager.class.name";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_DOC = "Fully 
qualified class name of `RemoteLogStorageManager` implementation.";
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_PROP = 
"remote.log.storage.manager.class.path";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_DOC = "Class 
path of the `RemoteLogStorageManager` implementation." +
+            "If specified, the RemoteLogStorageManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP = 
"remote.log.metadata.manager.class.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_DOC = 
"Fully qualified class name of `RemoteLogMetadataManager` implementation.";
+    //todo add the default topic based RLMM class name.
+    public static final String DEFAULT_REMOTE_LOG_METADATA_MANAGER_CLASS_NAME 
= "";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_PROP = 
"remote.log.metadata.manager.class.path";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_DOC = 
"Class path of the `RemoteLogMetadataManager` implementation." +
+            "If specified, the RemoteLogMetadataManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP 
= "remote.log.metadata.manager.listener.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_DOC = 
"Listener name of the local broker to which it should get connected if " +
+            "needed by RemoteLogMetadataManager implementation.";
+
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP = 
"remote.log.index.file.cache.total.size.bytes";
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_DOC = "The total size of the space 
allocated to store index files fetched " +
+            "from remote storage in the local storage.";
+    public static final long 
DEFAULT_REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES = 1024 * 1024 * 1024L;
+
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP = 
"remote.log.manager.thread.pool.size";
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC = "Size 
of the thread pool used in scheduling tasks to copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";
+    public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 10;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP = 
"remote.log.manager.task.interval.ms";
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_DOC = 
"Interval at which remote log manager runs the scheduled tasks like copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_INTERVAL_MS = 30 
* 1000L;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP 
= "remote.log.manager.task.retry.backoff.ms";
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_DOC = 
"It represents the wait time in milli seconds before the request is retried 
again.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS 
= 30 * 1000L;
+
+    public static final String 
REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_PROP = 
"remote.log.manager.task.retry.backoff.max.ms";
+    public static final String 
REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_DOC = "The maximum amount of time 
in milliseconds to wait when the request " +
+            "is retried again. The retry duration will increase exponentially 
for each request failure up to this maximum wait interval.";
+    public static final long 
DEFAULT_REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS = 30 * 1000L;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_JITTER_MS_PROP = 
"remote.log.manager.task.retry.jitter";
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_JITTER_MS_DOC = 
"The maximum random jitter subtracted from the scheduled " +
+            " wait time to avoid thundering herds of requests.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_RETRY_JITTER_MS = 
30 * 1000L;
+
+    public static final String REMOTE_LOG_READER_THREADS_PROP = 
"remote.log.reader.threads";

Review comment:
       Right, it is separate from `remote.log.manager.thread.pool.size`.

##########
File path: 
storage/src/main/java/org/apache/kafka/server/log/remote/storage/RemoteLogManagerConfig.java
##########
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.server.log.remote.storage;
+
+import org.apache.kafka.common.config.AbstractConfig;
+import org.apache.kafka.common.config.ConfigDef;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+
+import static org.apache.kafka.common.config.ConfigDef.Importance.LOW;
+import static org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM;
+import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
+import static org.apache.kafka.common.config.ConfigDef.Type.BOOLEAN;
+import static org.apache.kafka.common.config.ConfigDef.Type.INT;
+import static org.apache.kafka.common.config.ConfigDef.Type.LONG;
+import static org.apache.kafka.common.config.ConfigDef.Type.STRING;
+
+public final class RemoteLogManagerConfig {
+
+    /**
+     * Prefix used for properties to be passed to {@link RemoteStorageManager} 
implementation. Remote log subsystem collects all the properties having
+     * this prefix and passed to {@code RemoteStorageManager} using {@link 
RemoteStorageManager#configure(Map)}.
+     */
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_PROP = 
"remote.log.storage.manager.impl.prefix";
+    public static final String REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteStorageManager " +
+            "implementation. For example this value can be `rsm.s3.`.";
+
+    /**
+     * Prefix used for properties to be passed to {@link 
RemoteLogMetadataManager} implementation. Remote log subsystem collects all the 
properties having
+     * this prefix and passed to {@code RemoteLogMetadataManager} using {@link 
RemoteLogMetadataManager#configure(Map)}.
+     */
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_PROP 
= "remote.log.metadata.manager.impl.prefix";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_DOC = 
"Prefix used for properties to be passed to RemoteLogMetadataManager " +
+            "implementation. For example this value can be `rlmm.s3.`.";
+
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP = 
"remote.log.storage.system.enable";
+    public static final String REMOTE_LOG_STORAGE_SYSTEM_ENABLE_DOC = "Whether 
to enable tier storage functionality in a broker or not. Valid values " +
+            "are `true` or `false` and the default value is false. When it is 
true broker starts all the services required for tiered storage functionality.";
+    public static final boolean DEFAULT_REMOTE_LOG_STORAGE_SYSTEM_ENABLE = 
false;
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP = 
"remote.log.storage.manager.class.name";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_NAME_DOC = "Fully 
qualified class name of `RemoteLogStorageManager` implementation.";
+
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_PROP = 
"remote.log.storage.manager.class.path";
+    public static final String REMOTE_STORAGE_MANAGER_CLASS_PATH_DOC = "Class 
path of the `RemoteLogStorageManager` implementation." +
+            "If specified, the RemoteLogStorageManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP = 
"remote.log.metadata.manager.class.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_DOC = 
"Fully qualified class name of `RemoteLogMetadataManager` implementation.";
+    //todo add the default topic based RLMM class name.
+    public static final String DEFAULT_REMOTE_LOG_METADATA_MANAGER_CLASS_NAME 
= "";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_PROP = 
"remote.log.metadata.manager.class.path";
+    public static final String REMOTE_LOG_METADATA_MANAGER_CLASS_PATH_DOC = 
"Class path of the `RemoteLogMetadataManager` implementation." +
+            "If specified, the RemoteLogMetadataManager implementation and its 
dependent libraries will be loaded by a dedicated" +
+            "classloader which searches this class path before the Kafka 
broker class path. The syntax of this parameter is same" +
+            "with the standard Java class path string.";
+
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP 
= "remote.log.metadata.manager.listener.name";
+    public static final String REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_DOC = 
"Listener name of the local broker to which it should get connected if " +
+            "needed by RemoteLogMetadataManager implementation.";
+
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP = 
"remote.log.index.file.cache.total.size.bytes";
+    public static final String 
REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_DOC = "The total size of the space 
allocated to store index files fetched " +
+            "from remote storage in the local storage.";
+    public static final long 
DEFAULT_REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES = 1024 * 1024 * 1024L;
+
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP = 
"remote.log.manager.thread.pool.size";
+    public static final String REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_DOC = "Size 
of the thread pool used in scheduling tasks to copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";
+    public static final int DEFAULT_REMOTE_LOG_MANAGER_THREAD_POOL_SIZE = 10;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP = 
"remote.log.manager.task.interval.ms";
+    public static final String REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_DOC = 
"Interval at which remote log manager runs the scheduled tasks like copy " +
+            "segments, fetch remote log indexes and clean up remote log 
segments.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_INTERVAL_MS = 30 
* 1000L;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_PROP 
= "remote.log.manager.task.retry.backoff.ms";
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS_DOC = 
"It represents the wait time in milli seconds before the request is retried 
again.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MS 
= 30 * 1000L;
+
+    public static final String 
REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_PROP = 
"remote.log.manager.task.retry.backoff.max.ms";
+    public static final String 
REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS_DOC = "The maximum amount of time 
in milliseconds to wait when the request " +
+            "is retried again. The retry duration will increase exponentially 
for each request failure up to this maximum wait interval.";
+    public static final long 
DEFAULT_REMOTE_LOG_MANAGER_TASK_RETRY_BACK_OFF_MAX_MS = 30 * 1000L;
+
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_JITTER_MS_PROP = 
"remote.log.manager.task.retry.jitter";
+    public static final String REMOTE_LOG_MANAGER_TASK_RETRY_JITTER_MS_DOC = 
"The maximum random jitter subtracted from the scheduled " +
+            " wait time to avoid thundering herds of requests.";
+    public static final long DEFAULT_REMOTE_LOG_MANAGER_TASK_RETRY_JITTER_MS = 
30 * 1000L;
+
+    public static final String REMOTE_LOG_READER_THREADS_PROP = 
"remote.log.reader.threads";
+    public static final String REMOTE_LOG_READER_THREADS_DOC = "Size of the 
thread pool that is allocated for handling remote log reads.";
+    public static final int DEFAULT_REMOTE_LOG_READER_THREADS = 10;
+
+    public static final String REMOTE_LOG_READER_MAX_PENDING_TASKS_PROP = 
"remote.log.reader.max.pending.tasks";
+    public static final String REMOTE_LOG_READER_MAX_PENDING_TASKS_DOC = 
"Maximum remote log reader thread pool task queue size. If the task queue " +
+            "is full, fetch requests are served with an error.";
+    public static final int DEFAULT_REMOTE_LOG_READER_MAX_PENDING_TASKS = 100;
+
+    public static final ConfigDef CONFIG_DEF = new ConfigDef();
+
+    static {
+        
CONFIG_DEF.define(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, 
BOOLEAN,

Review comment:
       When moving these configs from other class, these were not optimized. 
Updated. 

##########
File path: core/src/main/scala/kafka/log/LogConfig.scala
##########
@@ -301,6 +350,16 @@ object LogConfig {
         FollowerReplicationThrottledReplicasDoc, 
FollowerReplicationThrottledReplicasProp)
       .define(MessageDownConversionEnableProp, BOOLEAN, 
Defaults.MessageDownConversionEnable, LOW,
         MessageDownConversionEnableDoc, 
KafkaConfig.LogMessageDownConversionEnableProp)
+
+    // RemoteLogStorageEnableProp, LocalLogRetentionMsProp, 
LocalLogRetentionBytesProp do not have server default
+    // config names.

Review comment:
       In the initial version, we do not want them to enable across 
cluster/broker. We will enable it in a future version.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to