lindong28 commented on a change in pull request #28: URL: https://github.com/apache/flink-ml/pull/28#discussion_r758878889
########## File path: flink-ml-lib/src/test/java/org/apache/flink/ml/classification/linear/LogisticRegressionTest.java ########## @@ -0,0 +1,277 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.classification.linear; + +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.common.typeinfo.Types; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.api.functions.sink.SinkFunction; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.types.Row; + +import org.junit.Before; +import org.junit.Test; + +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +/** Tests {@link LogisticRegression} and {@link LogisticRegressionModel}. */ +public class LogisticRegressionTest { + + private StreamExecutionEnvironment env; + + private StreamTableEnvironment tEnv; + + private static List<Row> trainData = + Arrays.asList( + Row.of(new double[] {1, 2, 3, 4}, -1., 1.), Review comment: Since `LogisticRegression` supports `weights`, would it be better to use different weights in the test data to provide more test coverage? ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/classification/linear/LogisticRegression.java ########## @@ -0,0 +1,653 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.classification.linear; + +import org.apache.flink.api.common.functions.RichMapFunction; +import org.apache.flink.api.common.state.ListState; +import org.apache.flink.api.common.state.ListStateDescriptor; +import org.apache.flink.api.common.typeinfo.BasicTypeInfo; +import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.common.typeutils.base.DoubleComparator; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.api.java.tuple.Tuple3; +import org.apache.flink.api.java.typeutils.TupleTypeInfo; +import org.apache.flink.iteration.DataStreamList; +import org.apache.flink.iteration.IterationBody; +import org.apache.flink.iteration.IterationBodyResult; +import org.apache.flink.iteration.IterationConfig; +import org.apache.flink.iteration.IterationConfig.OperatorLifeCycle; +import org.apache.flink.iteration.IterationListener; +import org.apache.flink.iteration.Iterations; +import org.apache.flink.iteration.ReplayableDataStreamList; +import org.apache.flink.iteration.operator.OperatorStateUtils; +import org.apache.flink.ml.api.Estimator; +import org.apache.flink.ml.common.broadcast.BroadcastUtils; +import org.apache.flink.ml.common.datastream.DataStreamUtils; +import org.apache.flink.ml.common.iteration.TerminateOnMaxIterOrTol; +import org.apache.flink.ml.linalg.BLAS; +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.util.ParamUtils; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.runtime.state.StateInitializationContext; +import org.apache.flink.runtime.state.StateSnapshotContext; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.api.operators.AbstractStreamOperator; +import org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator; +import org.apache.flink.streaming.api.operators.BoundedMultiInput; +import org.apache.flink.streaming.api.operators.BoundedOneInput; +import org.apache.flink.streaming.api.operators.OneInputStreamOperator; +import org.apache.flink.streaming.api.operators.TwoInputStreamOperator; +import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; +import org.apache.flink.util.Collector; +import org.apache.flink.util.OutputTag; +import org.apache.flink.util.Preconditions; + +import org.apache.commons.collections.IteratorUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +/** + * This class implements methods to train a logistic regression model. For details, see + * https://en.wikipedia.org/wiki/Logistic_regression. + */ +public class LogisticRegression + implements Estimator<LogisticRegression, LogisticRegressionModel>, + LogisticRegressionParams<LogisticRegression> { + + private Map<Param<?>, Object> paramMap = new HashMap<>(); + + private static final OutputTag<Tuple2<double[], double[]>> MODEL_OUTPUT = + new OutputTag<Tuple2<double[], double[]>>("MODEL_OUTPUT") {}; + + public LogisticRegression() { + ParamUtils.initializeMapWithDefaultValues(this.paramMap, this); + } + + @Override + public Map<Param<?>, Object> getParamMap() { + return paramMap; + } + + @Override + public void save(String path) throws IOException { + ReadWriteUtils.saveMetadata(this, path); + } + + public static LogisticRegression load(StreamExecutionEnvironment env, String path) + throws IOException { + return ReadWriteUtils.loadStageParam(path); + } + + @Override + @SuppressWarnings("unchecked") + public LogisticRegressionModel fit(Table... inputs) { + Preconditions.checkArgument(inputs.length == 1); + StreamTableEnvironment tEnv = + (StreamTableEnvironment) ((TableImpl) inputs[0]).getTableEnvironment(); + + DataStream<Tuple3<Double, Double, double[]>> trainData = + tEnv.toDataStream(inputs[0]) + .map( + dataPoint -> + Tuple3.of( + getWeightCol() == null + ? new Double(1.0) + : (Double) + dataPoint.getField(getWeightCol()), + (Double) dataPoint.getField(getLabelCol()), + (double[]) dataPoint.getField(getFeaturesCol()))) + .returns( + new TupleTypeInfo<>( Review comment: Many algorithms have `features`, `weight` and `label` and it is common for those algorithms to first convert the input `Row` into `Tuple3` of these fields. Hopefully we can setup a best practice of the order of these fields in the Tuple, so that the algorithm logic could consistent and intuitive to read. My understanding is that input (i.e. `features`, `weight`) usually comes before the expected output (i.e. `label`). And it is more intuitive for `features` to be before `weight` because `features` are more important. So the Tuple could be `<features, weight, label>`. What do you think? ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/classification/linear/LogisticRegression.java ########## @@ -0,0 +1,653 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.classification.linear; + +import org.apache.flink.api.common.functions.RichMapFunction; +import org.apache.flink.api.common.state.ListState; +import org.apache.flink.api.common.state.ListStateDescriptor; +import org.apache.flink.api.common.typeinfo.BasicTypeInfo; +import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.common.typeutils.base.DoubleComparator; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.api.java.tuple.Tuple3; +import org.apache.flink.api.java.typeutils.TupleTypeInfo; +import org.apache.flink.iteration.DataStreamList; +import org.apache.flink.iteration.IterationBody; +import org.apache.flink.iteration.IterationBodyResult; +import org.apache.flink.iteration.IterationConfig; +import org.apache.flink.iteration.IterationConfig.OperatorLifeCycle; +import org.apache.flink.iteration.IterationListener; +import org.apache.flink.iteration.Iterations; +import org.apache.flink.iteration.ReplayableDataStreamList; +import org.apache.flink.iteration.operator.OperatorStateUtils; +import org.apache.flink.ml.api.Estimator; +import org.apache.flink.ml.common.broadcast.BroadcastUtils; +import org.apache.flink.ml.common.datastream.DataStreamUtils; +import org.apache.flink.ml.common.iteration.TerminateOnMaxIterOrTol; +import org.apache.flink.ml.linalg.BLAS; +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.util.ParamUtils; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.runtime.state.StateInitializationContext; +import org.apache.flink.runtime.state.StateSnapshotContext; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.api.operators.AbstractStreamOperator; +import org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator; +import org.apache.flink.streaming.api.operators.BoundedMultiInput; +import org.apache.flink.streaming.api.operators.BoundedOneInput; +import org.apache.flink.streaming.api.operators.OneInputStreamOperator; +import org.apache.flink.streaming.api.operators.TwoInputStreamOperator; +import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; +import org.apache.flink.util.Collector; +import org.apache.flink.util.OutputTag; +import org.apache.flink.util.Preconditions; + +import org.apache.commons.collections.IteratorUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +/** + * This class implements methods to train a logistic regression model. For details, see + * https://en.wikipedia.org/wiki/Logistic_regression. + */ +public class LogisticRegression + implements Estimator<LogisticRegression, LogisticRegressionModel>, + LogisticRegressionParams<LogisticRegression> { + + private Map<Param<?>, Object> paramMap = new HashMap<>(); + + private static final OutputTag<Tuple2<double[], double[]>> MODEL_OUTPUT = + new OutputTag<Tuple2<double[], double[]>>("MODEL_OUTPUT") {}; + + public LogisticRegression() { + ParamUtils.initializeMapWithDefaultValues(this.paramMap, this); + } + + @Override + public Map<Param<?>, Object> getParamMap() { + return paramMap; + } + + @Override + public void save(String path) throws IOException { + ReadWriteUtils.saveMetadata(this, path); + } + + public static LogisticRegression load(StreamExecutionEnvironment env, String path) + throws IOException { + return ReadWriteUtils.loadStageParam(path); + } + + @Override + @SuppressWarnings("unchecked") + public LogisticRegressionModel fit(Table... inputs) { + Preconditions.checkArgument(inputs.length == 1); + StreamTableEnvironment tEnv = + (StreamTableEnvironment) ((TableImpl) inputs[0]).getTableEnvironment(); + + DataStream<Tuple3<Double, Double, double[]>> trainData = Review comment: The statement for calculating `Tuple3.of(...)` is a bit too long. And the code has extra `returns` statement which hopefully could be removed. Would the following code be a bit simpler? ``` DataStream<Tuple3<Double, Double, double[]>> trainData = tEnv.toDataStream(inputs[0]) .map( new MapFunction<Row, Tuple3<Double, Double, double[]>>() { @Override public Tuple3<Double, Double, double[]> map(Row dataPoint) { Double weight = getWeightCol() == null ? new Double(1.0) : (Double) dataPoint.getField(getWeightCol()); return Tuple3.of( weight, (Double) dataPoint.getField(getLabelCol()), (double[]) dataPoint.getField(getFeaturesCol())); } }); ``` ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/classification/linear/LogisticRegression.java ########## @@ -0,0 +1,653 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.classification.linear; + +import org.apache.flink.api.common.functions.RichMapFunction; +import org.apache.flink.api.common.state.ListState; +import org.apache.flink.api.common.state.ListStateDescriptor; +import org.apache.flink.api.common.typeinfo.BasicTypeInfo; +import org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.common.typeutils.base.DoubleComparator; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.api.java.tuple.Tuple3; +import org.apache.flink.api.java.typeutils.TupleTypeInfo; +import org.apache.flink.iteration.DataStreamList; +import org.apache.flink.iteration.IterationBody; +import org.apache.flink.iteration.IterationBodyResult; +import org.apache.flink.iteration.IterationConfig; +import org.apache.flink.iteration.IterationConfig.OperatorLifeCycle; +import org.apache.flink.iteration.IterationListener; +import org.apache.flink.iteration.Iterations; +import org.apache.flink.iteration.ReplayableDataStreamList; +import org.apache.flink.iteration.operator.OperatorStateUtils; +import org.apache.flink.ml.api.Estimator; +import org.apache.flink.ml.common.broadcast.BroadcastUtils; +import org.apache.flink.ml.common.datastream.DataStreamUtils; +import org.apache.flink.ml.common.iteration.TerminateOnMaxIterOrTol; +import org.apache.flink.ml.linalg.BLAS; +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.util.ParamUtils; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.runtime.state.StateInitializationContext; +import org.apache.flink.runtime.state.StateSnapshotContext; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.streaming.api.operators.AbstractStreamOperator; +import org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator; +import org.apache.flink.streaming.api.operators.BoundedMultiInput; +import org.apache.flink.streaming.api.operators.BoundedOneInput; +import org.apache.flink.streaming.api.operators.OneInputStreamOperator; +import org.apache.flink.streaming.api.operators.TwoInputStreamOperator; +import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; +import org.apache.flink.util.Collector; +import org.apache.flink.util.OutputTag; +import org.apache.flink.util.Preconditions; + +import org.apache.commons.collections.IteratorUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Random; + +/** + * This class implements methods to train a logistic regression model. For details, see + * https://en.wikipedia.org/wiki/Logistic_regression. + */ +public class LogisticRegression + implements Estimator<LogisticRegression, LogisticRegressionModel>, + LogisticRegressionParams<LogisticRegression> { + + private Map<Param<?>, Object> paramMap = new HashMap<>(); + + private static final OutputTag<Tuple2<double[], double[]>> MODEL_OUTPUT = + new OutputTag<Tuple2<double[], double[]>>("MODEL_OUTPUT") {}; + + public LogisticRegression() { + ParamUtils.initializeMapWithDefaultValues(this.paramMap, this); + } + + @Override + public Map<Param<?>, Object> getParamMap() { + return paramMap; + } + + @Override + public void save(String path) throws IOException { + ReadWriteUtils.saveMetadata(this, path); + } + + public static LogisticRegression load(StreamExecutionEnvironment env, String path) + throws IOException { + return ReadWriteUtils.loadStageParam(path); + } + + @Override + @SuppressWarnings("unchecked") + public LogisticRegressionModel fit(Table... inputs) { + Preconditions.checkArgument(inputs.length == 1); + StreamTableEnvironment tEnv = + (StreamTableEnvironment) ((TableImpl) inputs[0]).getTableEnvironment(); + + DataStream<Tuple3<Double, Double, double[]>> trainData = + tEnv.toDataStream(inputs[0]) + .map( + dataPoint -> + Tuple3.of( + getWeightCol() == null + ? new Double(1.0) + : (Double) + dataPoint.getField(getWeightCol()), + (Double) dataPoint.getField(getLabelCol()), + (double[]) dataPoint.getField(getFeaturesCol()))) + .returns( + new TupleTypeInfo<>( Review comment: And can we also put comments to explain the semantic meaning of fields of `trainData`? ########## File path: flink-ml-core/src/main/java/org/apache/flink/ml/common/datastream/DataStreamUtils.java ########## @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.common.datastream; + +import org.apache.flink.api.common.functions.MapPartitionFunction; +import org.apache.flink.api.common.state.ListState; +import org.apache.flink.api.common.state.ListStateDescriptor; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.common.typeutils.TypeComparator; +import org.apache.flink.api.java.typeutils.TypeExtractor; +import org.apache.flink.ml.common.utils.ComparatorAdapter; +import org.apache.flink.runtime.state.StateInitializationContext; +import org.apache.flink.runtime.state.StateSnapshotContext; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.operators.AbstractStreamOperator; +import org.apache.flink.streaming.api.operators.BoundedOneInput; +import org.apache.flink.streaming.api.operators.OneInputStreamOperator; +import org.apache.flink.streaming.api.operators.TimestampedCollector; +import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; + +import org.apache.commons.collections.IteratorUtils; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** Provides utility functions for {@link DataStream}. */ +public class DataStreamUtils { + /** + * Applies allReduceSum on the input data stream. The input data stream is supposed to contain + * one double array in each partition. The result data stream has the same parallelism as the + * input, where each partition contains one double array that sums all of the double arrays in + * the input data stream. + * + * <p>Note that we throw exception when one of the following two cases happen: + * <li>There exists one partition that contains more than one double array. + * <li>The length of the double array is not consistent among all partitions. + * + * @param input The input data stream. + * @return The result data stream. + */ + public static DataStream<double[]> allReduceSum(DataStream<double[]> input) { + return AllReduceImpl.allReduceSum(input); + } + + /** + * Collects distinct values in a bounded data stream. The parallelism of the output stream is 1. + * + * @param <T> The class type of the input data stream. + * @param input The bounded input data stream. + * @return The result data stream that contains all the distinct values. + */ + public static <T> DataStream<T> distinct(DataStream<T> input) { + return input.transform( + "distinctInEachPartition", + input.getType(), + new DistinctPartitionOperator<>()) + .setParallelism(input.getParallelism()) + .transform( + "distinctInFinalPartition", + input.getType(), + new DistinctPartitionOperator<>()) + .setParallelism(1); + } + + /** + * Applies a {@link MapPartitionFunction} on a bounded data stream. + * + * @param input The input data stream. + * @param func The user defined mapPartition function. + * @param <IN> The class type of the input element. + * @param <OUT> The class type of output element. + * @return The result data stream. + */ + public static <IN, OUT> DataStream<OUT> mapPartition( + DataStream<IN> input, MapPartitionFunction<IN, OUT> func) { + TypeInformation<OUT> resultType = + TypeExtractor.getMapPartitionReturnTypes(func, input.getType(), null, true); + return input.transform("mapPartition", resultType, new MapPartitionOperator<>(func)) + .setParallelism(input.getParallelism()); + } + + /** + * Sorts the elements in each partition of the input bounded data stream. + * + * @param input The input data stream. + * @param comparator The comparator used to sort the elements. + * @param <IN> The class type of input element. + * @return The sorted data stream. + */ + public static <IN> DataStream<IN> sortPartition( + DataStream<IN> input, TypeComparator<IN> comparator) { + return input.transform( + "sortPartition", input.getType(), new SortPartitionOperator<>(comparator)) + .setParallelism(input.getParallelism()); + } + + /** + * A stream operator to compute the distinct values in each partition of the input bounded data + * stream. + */ + static class DistinctPartitionOperator<T> extends AbstractStreamOperator<T> + implements OneInputStreamOperator<T, T>, BoundedOneInput { + + private ListState<T> distinctLabelsState; + + private Set<T> distinctLabels = new HashSet<>(); + + @Override + public void endInput() { + for (T distinctLabel : distinctLabels) { + output.collect(new StreamRecord<>(distinctLabel)); + } + distinctLabelsState.clear(); + } + + @Override + public void processElement(StreamRecord<T> streamRecord) { + distinctLabels.add(streamRecord.getValue()); Review comment: I am not sure the performance of doing this is strictly better than using `distinctLabelsState.add(streamRecord.getValue())` directly. The reason is that `ListState` could choose to cache those data in memory and only flush data to disk when snapshot is invoked. We can double check this with @gaoyunhaii. ########## File path: flink-ml-core/src/main/java/org/apache/flink/ml/common/datastream/DataStreamUtils.java ########## @@ -0,0 +1,244 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.common.datastream; + +import org.apache.flink.api.common.functions.MapPartitionFunction; +import org.apache.flink.api.common.state.ListState; +import org.apache.flink.api.common.state.ListStateDescriptor; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.common.typeutils.TypeComparator; +import org.apache.flink.api.java.typeutils.TypeExtractor; +import org.apache.flink.ml.common.utils.ComparatorAdapter; +import org.apache.flink.runtime.state.StateInitializationContext; +import org.apache.flink.runtime.state.StateSnapshotContext; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.operators.AbstractStreamOperator; +import org.apache.flink.streaming.api.operators.BoundedOneInput; +import org.apache.flink.streaming.api.operators.OneInputStreamOperator; +import org.apache.flink.streaming.api.operators.TimestampedCollector; +import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; + +import org.apache.commons.collections.IteratorUtils; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** Provides utility functions for {@link DataStream}. */ +public class DataStreamUtils { + /** + * Applies allReduceSum on the input data stream. The input data stream is supposed to contain + * one double array in each partition. The result data stream has the same parallelism as the + * input, where each partition contains one double array that sums all of the double arrays in + * the input data stream. + * + * <p>Note that we throw exception when one of the following two cases happen: + * <li>There exists one partition that contains more than one double array. + * <li>The length of the double array is not consistent among all partitions. + * + * @param input The input data stream. + * @return The result data stream. + */ + public static DataStream<double[]> allReduceSum(DataStream<double[]> input) { + return AllReduceImpl.allReduceSum(input); + } + + /** + * Collects distinct values in a bounded data stream. The parallelism of the output stream is 1. + * + * @param <T> The class type of the input data stream. + * @param input The bounded input data stream. + * @return The result data stream that contains all the distinct values. + */ + public static <T> DataStream<T> distinct(DataStream<T> input) { + return input.transform( + "distinctInEachPartition", + input.getType(), + new DistinctPartitionOperator<>()) + .setParallelism(input.getParallelism()) + .transform( + "distinctInFinalPartition", + input.getType(), + new DistinctPartitionOperator<>()) + .setParallelism(1); + } + + /** + * Applies a {@link MapPartitionFunction} on a bounded data stream. + * + * @param input The input data stream. + * @param func The user defined mapPartition function. + * @param <IN> The class type of the input element. + * @param <OUT> The class type of output element. + * @return The result data stream. + */ + public static <IN, OUT> DataStream<OUT> mapPartition( + DataStream<IN> input, MapPartitionFunction<IN, OUT> func) { + TypeInformation<OUT> resultType = + TypeExtractor.getMapPartitionReturnTypes(func, input.getType(), null, true); + return input.transform("mapPartition", resultType, new MapPartitionOperator<>(func)) + .setParallelism(input.getParallelism()); + } + + /** + * Sorts the elements in each partition of the input bounded data stream. + * + * @param input The input data stream. + * @param comparator The comparator used to sort the elements. + * @param <IN> The class type of input element. + * @return The sorted data stream. + */ + public static <IN> DataStream<IN> sortPartition( + DataStream<IN> input, TypeComparator<IN> comparator) { + return input.transform( + "sortPartition", input.getType(), new SortPartitionOperator<>(comparator)) + .setParallelism(input.getParallelism()); + } + + /** + * A stream operator to compute the distinct values in each partition of the input bounded data + * stream. + */ + static class DistinctPartitionOperator<T> extends AbstractStreamOperator<T> Review comment: nits: make it `private static class`? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: issues-unsubscr...@flink.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org