From 786a6cbb37d34ca918d178c36118dd2e142eacda Mon Sep 17 00:00:00 2001 From: Xingcan Cui Date: Tue, 10 Oct 2017 21:15:02 +0800 Subject: [PATCH] [FLINK-7764][kafka]FlinkKafkaProducer010 does not accept name, uid, or parallelism --- .../kafka/FlinkKafkaProducer010.java | 57 ++++++++++++++++++- 1 file changed, 54 insertions(+), 3 deletions(-) diff --git a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java index 3b43a7efc4188..857526823692d 100644 --- a/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java +++ b/flink-connectors/flink-connector-kafka-0.10/src/main/java/org/apache/flink/streaming/connectors/kafka/FlinkKafkaProducer010.java @@ -20,6 +20,8 @@ import org.apache.flink.streaming.api.datastream.DataStream; import org.apache.flink.streaming.api.datastream.DataStreamSink; import org.apache.flink.streaming.api.functions.sink.SinkFunction; +import org.apache.flink.streaming.api.operators.ChainingStrategy; +import org.apache.flink.streaming.api.transformations.SinkTransformation; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaDelegatePartitioner; import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner; @@ -320,18 +322,23 @@ record = new ProducerRecord<>(targetTopic, flinkKafkaPartitioner.partition(value * Configuration object returned by the writeToKafkaWithTimestamps() call. * *

This is only kept because it's part of the public API. It is not necessary anymore, now - * that the {@link SinkFunction} interface provides timestamps. + * that the {@link SinkFunction} interface provides timestamps.

+ * + *

To enable the settings, this fake sink must override all the public methods + * in {@link DataStreamSink}.

*/ public static class FlinkKafkaProducer010Configuration extends DataStreamSink { private final FlinkKafkaProducer010 producer; + private final SinkTransformation transformation; private FlinkKafkaProducer010Configuration( - DataStreamSink originalSink, + DataStreamSink originalSink, DataStream inputStream, FlinkKafkaProducer010 producer) { //noinspection unchecked super(inputStream, originalSink.getTransformation().getOperator()); + this.transformation = originalSink.getTransformation(); this.producer = producer; } @@ -367,6 +374,50 @@ public void setFlushOnCheckpoint(boolean flush) { public void setWriteTimestampToKafka(boolean writeTimestampToKafka) { producer.writeTimestampToKafka = writeTimestampToKafka; } - } + // ************************************************************************* + // Override methods to use the transformation in this class. + // ************************************************************************* + + @Override + public SinkTransformation getTransformation() { + return transformation; + } + + @Override + public DataStreamSink name(String name) { + transformation.setName(name); + return this; + } + + @Override + public DataStreamSink uid(String uid) { + transformation.setUid(uid); + return this; + } + + @Override + public DataStreamSink setUidHash(String uidHash) { + transformation.setUidHash(uidHash); + return this; + } + + @Override + public DataStreamSink setParallelism(int parallelism) { + transformation.setParallelism(parallelism); + return this; + } + + @Override + public DataStreamSink disableChaining() { + this.transformation.setChainingStrategy(ChainingStrategy.NEVER); + return this; + } + + @Override + public DataStreamSink slotSharingGroup(String slotSharingGroup) { + transformation.setSlotSharingGroup(slotSharingGroup); + return this; + } + } }