Skip to content

Commit

Permalink
[hotfix] Fix many many typos
Browse files Browse the repository at this point in the history
Fix typos from the IntelliJ "Typos" inspection.

This closes apache#5242
  • Loading branch information
greghogan authored and StephanEwen committed Jan 5, 2018
1 parent 0ae70ba commit 3bc293e
Show file tree
Hide file tree
Showing 181 changed files with 328 additions and 329 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -1164,7 +1164,7 @@ public CustomCommandLine getActiveCustomCommandLine(CommandLine commandLine) {

/**
* Retrieves the loaded custom command-lines.
* @return An unmodifiyable list of loaded custom command-lines.
* @return An unmodifiable list of loaded custom command-lines.
*/
public static List<CustomCommandLine<?>> getCustomCommandLineList() {
return Collections.unmodifiableList(customCommandLines);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ public void setDelayMillis(long delayMillis) {
/** The user specified config map that we forward to Elasticsearch when we create the {@link Client}. */
private final Map<String, String> userConfig;

/** The function that is used to construct mulitple {@link ActionRequest ActionRequests} from each incoming element. */
/** The function that is used to construct multiple {@link ActionRequest ActionRequests} from each incoming element. */
private final ElasticsearchSinkFunction<T> elasticsearchSinkFunction;

/** User-provided handler for failed {@link ActionRequest ActionRequests}. */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ public void cancel() {
}

/**
* A {@link ElasticsearchSinkFunction} that indexes each element it receives to a sepecified Elasticsearch index.
* A {@link ElasticsearchSinkFunction} that indexes each element it receives to a specified Elasticsearch index.
*/
public static class TestElasticsearchSinkFunction implements ElasticsearchSinkFunction<Tuple2<Integer, String>> {
private static final long serialVersionUID = 1L;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ public enum Semantic {
public static final String KEY_DISABLE_METRICS = "flink.disable-metrics";

/**
* Descriptor of the transacionalIds list.
* Descriptor of the transactional IDs list.
*/
private static final ListStateDescriptor<NextTransactionalIdHint> NEXT_TRANSACTIONAL_ID_HINT_DESCRIPTOR =
new ListStateDescriptor<>("next-transactional-id-hint", TypeInformation.of(NextTransactionalIdHint.class));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ public void testRestoreToCheckpointAfterExceedingProducersPool() throws Exceptio
assertIsCausedBy(FlinkKafka011ErrorCode.PRODUCERS_POOL_EMPTY, ex);
}

// Resume transactions before testHrness1 is being closed (in case of failures close() might not be called)
// Resume transactions before testHarness1 is being closed (in case of failures close() might not be called)
try (OneInputStreamOperatorTestHarness<Integer, Object> testHarness2 = createTestHarness(topic)) {
testHarness2.setup();
// restore from snapshot1, transactions with records 43 and 44 should be aborted
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ public FlinkKafkaProducer08(String topicId, SerializationSchema<IN> serializatio
* @param topicId The topic to write data to
* @param serializationSchema A (keyless) serializable serialization schema for turning user objects into a kafka-consumable byte[]
* @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
* @param customPartitioner A serializable partitioner for assining messages to Kafka partitions.
* @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
*/
public FlinkKafkaProducer08(String topicId, SerializationSchema<IN> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<IN> customPartitioner) {
this(topicId, new KeyedSerializationSchemaWrapper<>(serializationSchema), producerConfig, customPartitioner);
Expand Down Expand Up @@ -120,7 +120,7 @@ public FlinkKafkaProducer08(String topicId, KeyedSerializationSchema<IN> seriali
* @param topicId The topic to write data to
* @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
* @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
* @param customPartitioner A serializable partitioner for assining messages to Kafka partitions.
* @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
*/
public FlinkKafkaProducer08(String topicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<IN> customPartitioner) {
super(topicId, serializationSchema, producerConfig, customPartitioner);
Expand All @@ -134,7 +134,7 @@ public FlinkKafkaProducer08(String topicId, KeyedSerializationSchema<IN> seriali
* @param topicId The topic to write data to
* @param serializationSchema A (keyless) serializable serialization schema for turning user objects into a kafka-consumable byte[]
* @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
* @param customPartitioner A serializable partitioner for assining messages to Kafka partitions.
* @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
*
* @deprecated This is a deprecated constructor that does not correctly handle partitioning when
* producing to multiple topics. Use
Expand All @@ -151,7 +151,7 @@ public FlinkKafkaProducer08(String topicId, SerializationSchema<IN> serializatio
* @param topicId The topic to write data to
* @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages
* @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument.
* @param customPartitioner A serializable partitioner for assining messages to Kafka partitions.
* @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions.
*
* @deprecated This is a deprecated constructor that does not correctly handle partitioning when
* producing to multiple topics. Use
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ protected abstract static class Builder<T extends KafkaAvroTableSource, B extend
private Map<String, String> fieldMapping;

/**
* Sets the class of the Avro records that aree read from the Kafka topic.
* Sets the class of the Avro records that are read from the Kafka topic.
*
* @param avroClass The class of the Avro records that are read from the Kafka topic.
* @return The builder.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,7 @@ public B withSchema(TableSchema schema) {

/**
* Configures a field of the table to be a processing time attribute.
* The configured field must be present in the tabel schema and of type {@link Types#SQL_TIMESTAMP()}.
* The configured field must be present in the table schema and of type {@link Types#SQL_TIMESTAMP()}.
*
* @param proctimeAttribute The name of the processing time attribute in the table schema.
* @return The builder.
Expand All @@ -328,7 +328,7 @@ public B withProctimeAttribute(String proctimeAttribute) {

/**
* Configures a field of the table to be a rowtime attribute.
* The configured field must be present in the tabel schema and of type {@link Types#SQL_TIMESTAMP()}.
* The configured field must be present in the table schema and of type {@link Types#SQL_TIMESTAMP()}.
*
* @param rowtimeAttribute The name of the rowtime attribute in the table schema.
* @param timestampExtractor The {@link TimestampExtractor} to extract the rowtime attribute from the physical type.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ public class OffsetCommitModes {
* @param enableCommitOnCheckpoint whether or not committing on checkpoints is enabled.
* @param enableCheckpointing whether or not checkpoint is enabled for the consumer.
*
* @return the offset commmit mode to use, based on the configuration values.
* @return the offset commit mode to use, based on the configuration values.
*/
public static OffsetCommitMode fromConfiguration(
boolean enableAutoCommit,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ public ClosableBlockingQueue() {

/**
* Creates a new empty queue, reserving space for at least the specified number
* of elements. The queu can still grow, of more elements are added than the
* of elements. The queue can still grow, of more elements are added than the
* reserved space.
*
* @param initialSize The number of elements to reserve space for.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ public void testAsyncErrorRethrownOnCheckpoint() throws Throwable {
* Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint,
* it should be rethrown; we set a timeout because the test will not finish if the logic is broken.
*
* <p>Note that this test does not test the snapshot method is blocked correctly when there are pending recorrds.
* <p>Note that this test does not test the snapshot method is blocked correctly when there are pending records.
* The test for that is covered in testAtLeastOnceProducer.
*/
@SuppressWarnings("unchecked")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,7 @@ public void runStartFromLatestOffsets() throws Exception {
final String consumeExtraRecordsJobName = "Consume Extra Records Job";
final String writeExtraRecordsJobName = "Write Extra Records Job";

// seriliazation / deserialization schemas for writing and consuming the extra records
// serialization / deserialization schemas for writing and consuming the extra records
final TypeInformation<Tuple2<Integer, Integer>> resultType =
TypeInformation.of(new TypeHint<Tuple2<Integer, Integer>>() {});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
* <p>NOTE:
* In the AWS KCL library, there is a similar implementation - {@link com.amazonaws.services.kinesis.clientlibrary.proxies.KinesisProxy}.
* This implementation differs mainly in that we can make operations to arbitrary Kinesis streams, which is a needed
* functionality for the Flink Kinesis Connecter since the consumer may simultaneously read from multiple Kinesis streams.
* functionality for the Flink Kinesis Connector since the consumer may simultaneously read from multiple Kinesis streams.
*/
public class KinesisProxy implements KinesisProxyInterface {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ public void testAsyncErrorRethrownOnCheckpoint() throws Throwable {
* Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint,
* it should be rethrown; we set a timeout because the test will not finish if the logic is broken.
*
* <p>Note that this test does not test the snapshot method is blocked correctly when there are pending recorrds.
* <p>Note that this test does not test the snapshot method is blocked correctly when there are pending records.
* The test for that is covered in testAtLeastOnceProducer.
*/
@SuppressWarnings("ResultOfMethodCallIgnored")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ public Builder setPassword(String password) {
/**
* Convenience method for setting the fields in an AMQP URI: host,
* port, username, password and virtual host. If any part of the
* URI is ommited, the ConnectionFactory's corresponding variable
* URI is omitted, the ConnectionFactory's corresponding variable
* is left unchanged.
* @param uri is the AMQP URI containing the data
* @return the Builder
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@
*
* @param <K> Type of key
* @param <V> Type of value
* @param <T> The type iself
* @param <T> The type itself
*/
@Internal
public abstract class HadoopInputFormatBase<K, V, T> extends HadoopInputFormatCommonBase<T, HadoopInputSplit> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
import java.util.LinkedList;

/**
* Integraiton tests for Hadoop IO formats.
* Integration tests for Hadoop IO formats.
*/
@RunWith(Parameterized.class)
public class HadoopIOFormatsITCase extends JavaProgramTestBase {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ public abstract class TableInputFormat<T extends Tuple> extends AbstractTableInp
* The output from HBase is always an instance of {@link Result}.
* This method is to copy the data in the Result instance into the required {@link Tuple}
* @param r The Result instance from HBase that needs to be converted
* @return The approriate instance of {@link Tuple} that contains the needed information.
* @return The appropriate instance of {@link Tuple} that contains the needed information.
*/
protected abstract T mapResultToTuple(Result r);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ public long eval(byte[] bytes) {
}
}

// ######## TableInputFormate tests ############
// ######## TableInputFormat tests ############

class InputFormatForTestTable extends TableInputFormat<Tuple1<Integer>> {

Expand Down
2 changes: 1 addition & 1 deletion flink-contrib/docker-flink/create-docker-swarm-service.sh
Original file line number Diff line number Diff line change
Expand Up @@ -50,5 +50,5 @@ docker network create -d overlay ${OVERLAY_NETWORK_NAME}
# Create the jobmanager service
docker service create --name ${JOB_MANAGER_NAME} --env JOB_MANAGER_RPC_ADDRESS=${JOB_MANAGER_RPC_ADDRESS} -p ${SERVICE_PORT}:8081 --network ${OVERLAY_NETWORK_NAME} ${IMAGE_NAME} jobmanager

# Create the taskmanger service (scale this out as needed)
# Create the taskmanager service (scale this out as needed)
docker service create --name ${TASK_MANAGER_NAME} --env JOB_MANAGER_RPC_ADDRESS=${JOB_MANAGER_RPC_ADDRESS} --network ${OVERLAY_NETWORK_NAME} ${IMAGE_NAME} taskmanager
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public void testWikipediaEditsSource() throws Exception {

// Execute the source in a different thread and collect events into the queue.
// We do this in a separate thread in order to not block the main test thread
// indefinitely in case that somethign bad happens (like not receiving any
// indefinitely in case that something bad happens (like not receiving any
// events)
executorService.execute(() -> {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ public class RocksDBStateBackend extends AbstractStateBackend {
/** The state backend that we use for creating checkpoint streams. */
private final AbstractStateBackend checkpointStreamBackend;

/** Operator identifier that is used to uniqueify the RocksDB storage path. */
/** Operator identifier that is used to uniquify the RocksDB storage path. */
private String operatorIdentifier;

/** JobID for uniquifying backup paths. */
Expand Down Expand Up @@ -202,7 +202,7 @@ public RocksDBStateBackend(AbstractStateBackend checkpointStreamBackend) {
* {@link AbstractStateBackend#createStreamFactory(JobID, String) checkpoint stream}.
*
* @param checkpointStreamBackend The backend to store the
* @param enableIncrementalCheckpointing True if incremental checkponting is enabled
* @param enableIncrementalCheckpointing True if incremental checkpointing is enabled
*/
public RocksDBStateBackend(AbstractStateBackend checkpointStreamBackend, boolean enableIncrementalCheckpointing) {
this.checkpointStreamBackend = requireNonNull(checkpointStreamBackend);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
* <p>The input is a plain text file with lines separated by newline characters.
*
* <p>Usage:
* <code>ExclamationWithmBolt &lt;text path&gt; &lt;result path&gt; &lt;number of exclamation marks&gt;</code><br>
* <code>ExclamationWithBolt &lt;text path&gt; &lt;result path&gt; &lt;number of exclamation marks&gt;</code><br>
* If no parameters are provided, the program is run with default data from {@link WordCountData} with x=2.
*
* <p>This example shows how to:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
import org.apache.flink.test.testdata.WordCountData;

/**
* Test for the ExclamantionWithSpout example.
* Test for the ExclamationWithSpout example.
*/
public class ExclamationWithSpoutITCase extends StreamingProgramTestBase {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ protected void postSubmit() throws Exception {
Collections.sort(expectedResults);
System.out.println(actualResults);
for (int i = 0; i < actualResults.size(); ++i) {
//compare against actual results with removed prefex (as it depends e.g. on the hash function used)
//compare against actual results with removed prefix (as it depends e.g. on the hash function used)
Assert.assertEquals(expectedResults.get(i), actualResults.get(i));
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ public FlinkClient getClient() {
return this;
}

// The following methods are derived from "backtype.storm.generated.Nimubs.Client"
// The following methods are derived from "backtype.storm.generated.Nimbus.Client"

/**
* Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support
Expand Down
Loading

0 comments on commit 3bc293e

Please sign in to comment.