godfreyhe commented on a change in pull request #9203: [FLINK-13375][table-api] Improve config names in ExecutionConfigOptions and OptimizerConfigOptions URL: https://github.com/apache/flink/pull/9203#discussion_r307103832
########## File path: flink-table/flink-table-api-java/src/main/java/org/apache/flink/table/api/config/ExecutionConfigOptions.java ########## @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.table.api.config; + +import org.apache.flink.configuration.ConfigOption; + +import static org.apache.flink.configuration.ConfigOptions.key; + +/** + * This class holds configuration constants used by Flink's table module. + */ +public class ExecutionConfigOptions { + + // ------------------------------------------------------------------------ + // Source Options + // ------------------------------------------------------------------------ + public static final ConfigOption<String> TABLE_EXEC_SOURCE_IDLE_TIMEOUT = + key("table.exec.source.idle-timeout") + .defaultValue("-1 ms") + .withDescription("When a source do not receive any elements for the timeout time, " + + "it will be marked as temporarily idle. This allows downstream " + + "tasks to advance their watermarks without the need to wait for " + + "watermarks from this source while it is idle."); + + // ------------------------------------------------------------------------ + // Sort Options + // ------------------------------------------------------------------------ + public static final ConfigOption<Integer> TABLE_EXEC_SORT_DEFAULT_LIMIT = + key("table.exec.sort.default-limit") + .defaultValue(200) + .withDescription("Default limit when user don't set a limit after order by."); + + public static final ConfigOption<Integer> TABLE_EXEC_SORT_MAX_NUM_FILE_HANDLES = + key("table.exec.sort.max-num-file-handles") + .defaultValue(128) + .withDescription("The maximal fan-in for external merge sort. It limits the number of file handles per operator. " + + "If it is too small, may cause intermediate merging. But if it is too large, " + + "it will cause too many files opened at the same time, consume memory and lead to random reading."); + + public static final ConfigOption<Boolean> TABLE_EXEC_SORT_ASYNC_MERGE_ENABLED = + key("table.exec.sort.async-merge-enabled") + .defaultValue(true) + .withDescription("Whether to asynchronously merge sorted spill files."); + + // ------------------------------------------------------------------------ + // Spill Options + // ------------------------------------------------------------------------ + public static final ConfigOption<Boolean> TABLE_EXEC_SPILL_COMPRESSION_ENABLED = + key("table.exec.spill-compression.enabled") + .defaultValue(true) + .withDescription("Whether to compress spilled data. " + + "(Now include sort and hash agg and hash join)"); + + public static final ConfigOption<String> TABLE_EXEC_SPILL_COMPRESSION_CODEC = + key("table.exec.spill-compression.codec") + .defaultValue("lz4") + .withDescription("Use that compression codec to compress spilled file. " + + "Now we only support lz4."); + + public static final ConfigOption<Integer> TABLE_EXEC_SPILL_COMPRESSION_BLOCK_SIZE = + key("table.exec.spill-compression.block-size") + .defaultValue(64 * 1024) + .withDescription("The buffer is to compress. The larger the buffer," + + " the better the compression ratio, but the more memory consumption."); + + // ------------------------------------------------------------------------ + // Resource Options + // ------------------------------------------------------------------------ + + public static final ConfigOption<Integer> TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM = + key("table.exec.resource.default-parallelism") + .defaultValue(-1) + .withDescription("Default parallelism of job operators. If it is <= 0, use parallelism of StreamExecutionEnvironment(" + + "its default value is the num of cpu cores in the client host)."); + + public static final ConfigOption<Integer> TABLE_EXEC_RESOURCE_SOURCE_PARALLELISM = + key("table.exec.resource.source.parallelism") + .defaultValue(-1) + .withDescription("Sets source parallelism, if it is <= 0, use " + TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM.key() + " to set source parallelism."); + + public static final ConfigOption<Integer> TABLE_EXEC_RESOURCE_SINK_PARALLELISM = + key("table.exec.resource.sink.parallelism") + .defaultValue(-1) + .withDescription("Sets sink parallelism, if it is <= 0, use " + TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM.key() + " to set sink parallelism."); + + public static final ConfigOption<String> TABLE_EXEC_RESOURCE_EXTERNAL_BUFFER_MEMORY = + key("table.exec.resource.external-buffer-memory") + .defaultValue("10 mb") + .withDescription("Sets the external buffer memory size that is used in sort merge join and nested join and over window."); + + public static final ConfigOption<String> TABLE_EXEC_RESOURCE_HASH_AGG_MEMORY = + key("table.exec.resource.hash-agg.memory") + .defaultValue("128 mb") + .withDescription("Sets the managed memory size of hash aggregate operator."); + + public static final ConfigOption<String> TABLE_EXEC_RESOURCE_HASH_JOIN_MEMORY = + key("table.exec.resource.hash-join.memory") + .defaultValue("128 mb") + .withDescription("Sets the managed memory for hash join operator. It defines the lower limit."); + + public static final ConfigOption<String> TABLE_EXEC_RESOURCE_SORT_MEMORY = + key("table.exec.resource.sort.memory") + .defaultValue("128 mb") + .withDescription("Sets the managed buffer memory size for sort."); Review comment: Sets the managed buffer memory size for sort operator. ? ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services