[ https://issues.apache.org/jira/browse/FLINK-5658?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15926463#comment-15926463 ]
ASF GitHub Bot commented on FLINK-5658: --------------------------------------- Github user fhueske commented on a diff in the pull request: https://github.com/apache/flink/pull/3386#discussion_r106203953 --- Diff: flink-libraries/flink-table/src/test/java/org/apache/flink/table/api/java/stream/sql/UnboundedEventTimeOverProcessFuncTest.java --- @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.flink.table.api.java.stream.sql; + + +import org.apache.flink.api.common.typeinfo.BasicTypeInfo; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.java.functions.KeySelector; +import org.apache.flink.api.java.tuple.Tuple; +import org.apache.flink.api.java.tuple.Tuple1; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.api.java.typeutils.TupleTypeInfo; +import org.apache.flink.streaming.api.operators.KeyedProcessOperator; +import org.apache.flink.streaming.api.operators.ProcessOperator; +import org.apache.flink.streaming.api.watermark.Watermark; +import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; +import org.apache.flink.streaming.runtime.tasks.OperatorStateHandles; +import org.apache.flink.streaming.util.KeyedOneInputStreamOperatorTestHarness; +import org.apache.flink.streaming.util.OneInputStreamOperatorTestHarness; +import org.apache.flink.streaming.util.TestHarnessUtil; +import org.apache.flink.table.functions.AggregateFunction; +import org.apache.flink.table.functions.aggfunctions.IntSumAggFunction; +import org.apache.flink.table.runtime.aggregate.UnboundedEventTimeOverProcessFunction; +import org.apache.flink.types.Row; +import org.apache.flink.util.TestLogger; +import org.junit.Test; + +import java.util.concurrent.ConcurrentLinkedQueue; + + +/** + * Tests {@link ProcessOperator}. + */ +public class UnboundedEventTimeOverProcessFuncTest extends TestLogger { + + @Test + public void testUnboundedEventSnapshotAndRestore() throws Exception { + + AggregateFunction[] aggFunc = new AggregateFunction[1]; + aggFunc[0] = new IntSumAggFunction(); + int[] aggField = new int[1]; + aggField[0] = 0; + + TypeInformation<Row> returnType = new RowTypeInfo(BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO); + TypeInformation<Row> interMediateType = new RowTypeInfo(BasicTypeInfo.INT_TYPE_INFO, aggFunc[0].getAccumulatorType()); + KeySelector<Row, Tuple> keyselector = new KeySelector<Row, Tuple>() { + @Override + public Tuple getKey(Row value) throws Exception { + return new Tuple1<Integer>(1); + } + }; + + KeyedProcessOperator<Tuple, Row, Row> operator = + new KeyedProcessOperator<>( + new UnboundedEventTimeOverProcessFunction( + aggFunc, aggField, 1, interMediateType, keyselector, + new TupleTypeInfo<Tuple>(BasicTypeInfo.INT_TYPE_INFO))); + + OneInputStreamOperatorTestHarness<Row, Row> testHarness = + new KeyedOneInputStreamOperatorTestHarness<>( + operator, keyselector, new TupleTypeInfo<Tuple>(BasicTypeInfo.INT_TYPE_INFO)); + + testHarness.setup(); + testHarness.open(); + + Row inputRow = new Row(1); + inputRow.setField(0, 1); + testHarness.processElement(new StreamRecord<>(inputRow, 12L)); + testHarness.processElement(new StreamRecord<>(inputRow, 12L)); + testHarness.processElement(new StreamRecord<>(inputRow, 12L)); + + ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>(); + Row res = new Row(2); --- End diff -- Use `Row.of()` to generate rows more concisely. > Add event time OVER RANGE BETWEEN UNBOUNDED PRECEDING aggregation to SQL > ------------------------------------------------------------------------ > > Key: FLINK-5658 > URL: https://issues.apache.org/jira/browse/FLINK-5658 > Project: Flink > Issue Type: Sub-task > Components: Table API & SQL > Reporter: Fabian Hueske > Assignee: Yuhong Hong > > The goal of this issue is to add support for OVER RANGE aggregations on event > time streams to the SQL interface. > Queries similar to the following should be supported: > {code} > SELECT > a, > SUM(b) OVER (PARTITION BY c ORDER BY rowTime() RANGE BETWEEN UNBOUNDED > PRECEDING AND CURRENT ROW) AS sumB, > MIN(b) OVER (PARTITION BY c ORDER BY rowTime() RANGE BETWEEN UNBOUNDED > PRECEDING AND CURRENT ROW) AS minB > FROM myStream > {code} > The following restrictions should initially apply: > - All OVER clauses in the same SELECT clause must be exactly the same. > - The PARTITION BY clause is optional (no partitioning results in single > threaded execution). > - The ORDER BY clause may only have rowTime() as parameter. rowTime() is a > parameterless scalar function that just indicates processing time mode. > - bounded PRECEDING is not supported (see FLINK-5655) > - FOLLOWING is not supported. > The restrictions will be resolved in follow up issues. If we find that some > of the restrictions are trivial to address, we can add the functionality in > this issue as well. > This issue includes: > - Design of the DataStream operator to compute OVER ROW aggregates > - Translation from Calcite's RelNode representation (LogicalProject with > RexOver expression). -- This message was sent by Atlassian JIRA (v6.3.15#6346)