Skip to content

Commit

Permalink
Fix javadoc errors that surface with JDK8
Browse files Browse the repository at this point in the history
----Release Notes----

[]
-------------
Created by MOE: http://code.google.com/p/moe-java
MOE_MIGRATED_REVID=94149298
  • Loading branch information
kennknowles authored and davorbonaci committed May 21, 2015
1 parent e13bc84 commit 410f253
Show file tree
Hide file tree
Showing 12 changed files with 176 additions and 81 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -76,18 +76,22 @@
*
* <p> To execute this pipeline using the Dataflow service in batch mode,
* specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=DataflowPipelineRunner
* --inputFile=gs://path/to/input*.txt
* }</pre>
*
* <p> To execute this pipeline using the Dataflow service in streaming mode,
* specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=DataflowPipelineRunner
* --inputFile=gs://path/to/input*.txt
* --inputFile=gs://YOUR_INPUT_DIRECTORY/*.txt
* --streaming
* }</pre>
*
* <p> This will update the datastore every 10 seconds based on the last
* 30 minutes of data received.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,19 +46,29 @@
* table.
*
* <p> To execute this pipeline locally, specify general pipeline configuration:
* --project=<PROJECT ID>
* and the BigQuery table for the output:
* --output=<project_id>:<dataset_id>.<table_id>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* }
* </pre>
* and the BigQuery table for the output, with the form
* <pre>{@code
* --output=YOUR_PROJECT_ID:DATASET_ID.TABLE_ID
* }</pre>
*
* <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=BlockingDataflowPipelineRunner
* }
* </pre>
* and the BigQuery table for the output:
* --output=<project_id>:<dataset_id>.<table_id>
* <pre>{@code
* --output=YOUR_PROJECT_ID:DATASET_ID.TABLE_ID
* }</pre>
*
* <p> The BigQuery input table defaults to clouddataflow-readonly:samples.weather_stations and can
* be overridden with --input.
* <p> The BigQuery input table defaults to {@code clouddataflow-readonly:samples.weather_stations}
* and can be overridden with {@code --input}.
*/
public class BigQueryTornadoes {
// Default to using a 1000 row subset of the public weather station table publicdata:samples.gsod.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,19 +53,29 @@
* table.
*
* <p> To execute this pipeline locally, specify general pipeline configuration:
* --project=<PROJECT ID>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* }
* </pre>
* and the BigQuery table for the output:
* --output=<project_id>:<dataset_id>.<table_id>
* <pre>{@code
* --output=YOUR_PROJECT_ID:DATASET_ID.TABLE_ID
* }</pre>
*
* <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
* --project=<PROJECT ID>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://<STAGING DIRECTORY>
* --runner=BlockingDataflowPipelineRunner
* }
* </pre>
* and the BigQuery table for the output:
* --output=<project_id>:<dataset_id>.<table_id>
* <pre>{@code
* --output=YOUR_PROJECT_ID:DATASET_ID.TABLE_ID
* }</pre>
*
* <p> The BigQuery input table defaults to publicdata:samples.shakespeare and can
* be overridden with --input.
* <p> The BigQuery input table defaults to {@code publicdata:samples.shakespeare} and can
* be overridden with {@code --input}.
*/
public class CombinePerKeyExamples {
// Use the shakespeare public BigQuery sample
Expand All @@ -75,7 +85,7 @@ public class CombinePerKeyExamples {
private static final int MIN_WORD_LENGTH = 9;

/**
* Examines each row in the input table. If the word is >= MIN_WORD_LENGTH,
* Examines each row in the input table. If the word is greater than or equal to MIN_WORD_LENGTH,
* outputs word, play_name.
*/
static class ExtractLargeWordsFn extends DoFn<TableRow, KV<String, String>> {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,19 +45,19 @@
* data to Datastore, which may incur charge for Datastore operations.
*
* <p> To run this example, users need to use gcloud to get credential for Datastore:
* <pre>
* <pre>{@code
* $ gcloud auth login
* </pre>
* }</pre>
*
* <p> Note that the environment variable CLOUDSDK_EXTRA_SCOPES must be set
* to the same value when executing a Datastore pipeline, as the local auth
* cache is keyed by the requested scopes.
*
* <p> To run this pipeline locally, the following options must be provided:
* <pre>{@code
* --project=<PROJECT ID>
* --dataset=<DATASET ID>
* --output=[<LOCAL FILE> | gs://<OUTPUT PATH>]
* --project=YOUR_PROJECT_ID
* --dataset=YOUR_DATASET_ID
* --output=[YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PATH]
* }</pre>
*
* <p> To run this example using Dataflow service, you must additionally
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,22 +32,22 @@
* duplicate lines across all the files. (The output does not preserve any input order).
*
* <p> Concepts: the RemoveDuplicates transform, and how to wire transforms together.
* Demonstrates TextIO.Read/RemoveDuplicates/TextIO.Write.
* Demonstrates {@link TextIO.Read}/{@link RemoveDuplicates}/{@link TextIO.Write}.
*
* <p> To execute this pipeline locally, specify general pipeline configuration:
* --project=<PROJECT ID>
* --project=YOUR_PROJECT_ID
* and a local output file or output prefix on GCS:
* --output=[<LOCAL FILE> | gs://<OUTPUT PREFIX>]
* --output=[YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
*
* <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=BlockingDataflowPipelineRunner
* and an output prefix on GCS:
* --output=gs://<OUTPUT PREFIX>
* --output=gs://YOUR_OUTPUT_PREFIX
*
* <p> The input defaults to gs://dataflow-samples/shakespeare/* and can be
* overridden with --input.
* <p> The input defaults to {@code gs://dataflow-samples/shakespeare/*} and can be
* overridden with {@code --input}.
*/
public class DeDupExample {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,23 +55,35 @@
* table.
*
* <p> To execute this pipeline locally, specify general pipeline configuration:
* --project=<PROJECT ID>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* }
* </pre>
* and the BigQuery table for the output:
* --output=<project_id>:<dataset_id>.<table_id>
* <pre>{@code
* --output=YOUR_PROJECT_ID:DATASET_ID.TABLE_ID
* [--monthFilter=<month_number>]
* where optional parameter --monthFilter is set to a number 1-12.
* }
* </pre>
* where optional parameter {@code --monthFilter} is set to a number 1-12.
*
* <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=BlockingDataflowPipelineRunner
* }
* </pre>
* and the BigQuery table for the output:
* --output=<project_id>:<dataset_id>.<table_id>
* <pre>{@code
* --output=YOUR_PROJECT_ID:DATASET_ID.TABLE_ID
* [--monthFilter=<month_number>]
* where optional parameter --monthFilter is set to a number 1-12.
* }
* </pre>
* where optional parameter {@code --monthFilter} is set to a number 1-12.
*
* <p> The BigQuery input table defaults to clouddataflow-readonly:samples.weather_stations and can
* be overridden with --input.
* <p> The BigQuery input table defaults to {@code clouddataflow-readonly:samples.weather_stations}
* and can be overridden with {@code --input}.
*/
public class FilterExamples {
// Default to using a 1000 row subset of the public weather station table publicdata:samples.gsod.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,26 @@
* <p> Concepts: Join operation; multiple input sources.
*
* <p> To execute this pipeline locally, specify general pipeline configuration:
* --project=<PROJECT ID>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* }
* </pre>
* and a local output file or output prefix on GCS:
* --output=[<LOCAL FILE> | gs://<OUTPUT PREFIX>]
* <pre>{@code
* --output=[YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
* }</pre>
*
* <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=BlockingDataflowPipelineRunner
* }
* </pre>
* and an output prefix on GCS:
* --output=gs://<OUTPUT PREFIX>
* <pre>{@code
* --output=gs://YOUR_OUTPUT_PREFIX
* }</pre>
*/
public class JoinExamples {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,19 +47,29 @@
* table.
*
* <p> To execute this pipeline locally, specify general pipeline configuration:
* --project=<PROJECT ID>
* and the BigQuery table for the output:
* --output=<project_id>:<dataset_id>.<table_id>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* }
* </pre>
* and the BigQuery table for the output, with the form
* <pre>{@code
* --output=YOUR_PROJECT_ID:DATASET_ID.TABLE_ID
* }</pre>
*
* <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=BlockingDataflowPipelineRunner
* }
* </pre>
* and the BigQuery table for the output:
* --output=<project_id>:<dataset_id>.<table_id>
* <pre>{@code
* --output=YOUR_PROJECT_ID:DATASET_ID.TABLE_ID
* }</pre>
*
* <p> The BigQuery input table defaults to clouddataflow-readonly:samples.weather_stations and can
* be overridden with --input.
* <p> The BigQuery input table defaults to {@code clouddataflow-readonly:samples.weather_stations }
* and can be overridden with {@code --input}.
*/
public class MaxPerKeyExamples {
// Default to using a 1000 row subset of the public weather station table publicdata:samples.gsod.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,18 +67,25 @@
* <p> Concepts: joining data; side inputs; logging
*
* <p> To execute this pipeline locally, specify general pipeline configuration:
* --project=<PROJECT ID>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* }</pre>
* and a local output file or output prefix on GCS:
* --output=[<LOCAL FILE> | gs://<OUTPUT PREFIX>]
* <pre>{@code
* --output=[YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
* }</pre>
*
* <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=BlockingDataflowPipelineRunner
* and an output prefix on GCS:
* --output=gs://<OUTPUT PREFIX>
* --output=gs://YOUR_OUTPUT_PREFIX
* }</pre>
*
* <p> The default input is gs://dataflow-samples/shakespeare/ and can be overridden with --input.
* <p> The default input is {@code gs://dataflow-samples/shakespeare/} and can be overridden with
* {@code --input}.
*/
public class TfIdf {
/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,19 +55,27 @@
* data.
*
* <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=BlockingDataflowPipelineRunner
* }
* </pre>
* and an output prefix on GCS:
* --output=gs://<OUTPUT PREFIX>
* <pre>{@code
* --output=gs://YOUR_OUTPUT_PREFIX
* }</pre>
*
* <p> The default input is gs://dataflow-samples/wikipedia_edits/*.json and can be overridden with
* --input.
* <p> The default input is {@code gs://dataflow-samples/wikipedia_edits/*.json} and can be
* overridden with {@code --input}.
*
* <p> The input for this example is large enough that it's a good place to enable (experimental)
* autoscaling:
* <pre>{@code
* --autoscalingAlgorithm=BASIC
* --maxNumWorkers=20
* }
* </pre>
* This will automatically scale the number of workers up over time until the job completes.
*/
public class TopWikipediaSessions {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,22 +40,34 @@
/**
* An example that counts words in Shakespeare. For a detailed walkthrough of this
* example see:
* <a href="https://cloud.google.com/dataflow/java-sdk/wordcount-example">
* https://cloud.google.com/dataflow/java-sdk/wordcount-example
* </a>
*
* <p> To execute this pipeline locally, specify general pipeline configuration:
* --project=<PROJECT ID>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* }
* </pre>
* and a local output file or output prefix on GCS:
* --output=[<LOCAL FILE> | gs://<OUTPUT PREFIX>]
* <pre>{@code
* --output=[YOUR_LOCAL_FILE | gs://YOUR_OUTPUT_PREFIX]
* }</pre>
*
* <p> To execute this pipeline using the Dataflow service, specify pipeline configuration:
* --project=<PROJECT ID>
* --stagingLocation=gs://<STAGING DIRECTORY>
* <pre>{@code
* --project=YOUR_PROJECT_ID
* --stagingLocation=gs://YOUR_STAGING_DIRECTORY
* --runner=BlockingDataflowPipelineRunner
* }
* </pre>
* and an output prefix on GCS:
* --output=gs://<OUTPUT PREFIX>
* <pre>{@code
* --output=gs://YOUR_OUTPUT_PREFIX
* }</pre>
*
* <p> The input file defaults to gs://dataflow-samples/shakespeare/kinglear.txt and can be
* overridden with --input.
* <p> The input file defaults to {@code gs://dataflow-samples/shakespeare/kinglear.txt} and can be
* overridden with {@code --input}.
*/
public class WindowingWordCount {

Expand Down
Loading

0 comments on commit 410f253

Please sign in to comment.