wuchong commented on a change in pull request #9366: [FLINK-13359][docs] Add documentation for DDL introduction URL: https://github.com/apache/flink/pull/9366#discussion_r314658244
########## File path: docs/dev/table/connect.md ########## @@ -753,6 +815,70 @@ connector: sink-partitioner-class: org.mycompany.MyPartitioner # optional: used in case of sink partitioner custom {% endhighlight %} </div> + +<div data-lang="DDL" markdown="1"> +{% highlight sql %} +-- CREATE a 011 version Kafka table start from the earliest offset(as table source) +-- and append mode(as table sink). +create table MyUserTable ( + user bigint, + message string, + ts string +) with ( + 'connector.type' = 'kafka', + + 'connector.version' = '0.11', -- required: valid connector versions are + -- "0.8", "0.9", "0.10", "0.11", and "universal" + + 'connector.topic' = 'topic_name', -- required: topic name from which the table is read + + 'update-mode' = 'append', -- required: update mode when used as table sink, + -- only support append mode now. + + 'format.type' = 'avro', -- required: specify which format to deserialize(as table source) + -- and serialize(as table sink). + -- Valid format types are : "csv", "json", "avro". + + 'connector.properties.0.key' = 'zookeeper.connect', -- optional: connector specific properties + 'connector.properties.0.value' = 'localhost:2181', + 'connector.properties.0.key' = 'bootstrap.servers', + 'connector.properties.0.value' = 'localhost:9092', + 'connector.properties.0.key' = 'group.id', + 'connector.properties.0.value' = 'testGroup', + 'connector.startup-mode' = 'earliest-offset', -- optional: valid modes are "earliest-offset", "latest-offset", + -- "group-offsets", or "specific-offsets" + + 'connector.specific-offsets.partition' = '0', -- optional: used in case of startup mode with specific offsets + 'connector.specific-offsets.offset' = '42', + 'connector.specific-offsets.partition' = '1', + 'connector.specific-offsets.offset' = '300', + + 'connector.sink-partitioner' = '...', -- optional: output partitioning from Flink's partitions + -- into Kafka's partitions valid are "fixed" + -- (each Flink partition ends up in at most one Kafka partition), + -- "round-robin" (a Flink partition is distributed to + -- Kafka partitions round-robin) + -- "custom" (use a custom FlinkKafkaPartitioner subclass) + + 'format.derive-schema' = 'true', -- optional: derive the serialize/deserialize format Review comment: Remove this. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services