diff --git a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java index 61bafc8380049..1008c67de3491 100644 --- a/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java +++ b/common/network-common/src/main/java/org/apache/spark/network/client/TransportClientFactory.java @@ -194,8 +194,8 @@ private TransportClient createClient(InetSocketAddress address) throws IOExcepti .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs()) .option(ChannelOption.ALLOCATOR, pooledAllocator); - final AtomicReference clientRef = new AtomicReference(); - final AtomicReference channelRef = new AtomicReference(); + final AtomicReference clientRef = new AtomicReference<>(); + final AtomicReference channelRef = new AtomicReference<>(); bootstrap.handler(new ChannelInitializer() { @Override diff --git a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java index f97e76d7ed0d9..7a114df2d6857 100644 --- a/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java +++ b/core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java @@ -84,9 +84,9 @@ final class ShuffleExternalSorter extends MemoryConsumer { * this might not be necessary if we maintained a pool of re-usable pages in the TaskMemoryManager * itself). */ - private final LinkedList allocatedPages = new LinkedList(); + private final LinkedList allocatedPages = new LinkedList<>(); - private final LinkedList spills = new LinkedList(); + private final LinkedList spills = new LinkedList<>(); /** Peak memory used by this sorter so far, in bytes. **/ private long peakMemoryUsedBytes; diff --git a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java index f19ed01d5aebf..0cf84d5f9b716 100644 --- a/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java +++ b/core/src/main/java/org/apache/spark/status/api/v1/TaskSorting.java @@ -29,7 +29,7 @@ public enum TaskSorting { private final Set alternateNames; private TaskSorting(String... names) { - alternateNames = new HashSet(); + alternateNames = new HashSet<>(); for (String n: names) { alternateNames.add(n); } diff --git a/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java b/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java index 1692df7d30f0d..3e47bfc274cb1 100644 --- a/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java +++ b/core/src/test/java/org/apache/spark/launcher/SparkLauncherSuite.java @@ -88,7 +88,7 @@ public void testSparkArgumentHandling() throws Exception { @Test public void testChildProcLauncher() throws Exception { SparkSubmitOptionParser opts = new SparkSubmitOptionParser(); - Map env = new HashMap(); + Map env = new HashMap<>(); env.put("SPARK_PRINT_LAUNCH_COMMAND", "1"); SparkLauncher launcher = new SparkLauncher(env) diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java index add9d937d387b..ddea6f5a69b18 100644 --- a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java +++ b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java @@ -67,7 +67,7 @@ public class UnsafeShuffleWriterSuite { File mergedOutputFile; File tempDir; long[] partitionSizesInMergedFile; - final LinkedList spillFilesCreated = new LinkedList(); + final LinkedList spillFilesCreated = new LinkedList<>(); SparkConf conf; final Serializer serializer = new KryoSerializer(new SparkConf()); TaskMetrics taskMetrics; @@ -217,7 +217,7 @@ private void assertSpillFilesWereCleanedUp() { } private List> readRecordsFromFile() throws IOException { - final ArrayList> recordsList = new ArrayList>(); + final ArrayList> recordsList = new ArrayList<>(); long startOffset = 0; for (int i = 0; i < NUM_PARTITITONS; i++) { final long partitionSize = partitionSizesInMergedFile[i]; @@ -286,8 +286,7 @@ public void writeEmptyIterator() throws Exception { @Test public void writeWithoutSpilling() throws Exception { // In this example, each partition should have exactly one record: - final ArrayList> dataToWrite = - new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); for (int i = 0; i < NUM_PARTITITONS; i++) { dataToWrite.add(new Tuple2(i, i)); } @@ -325,8 +324,7 @@ private void testMergingSpills( conf.set("spark.shuffle.compress", "false"); } final UnsafeShuffleWriter writer = createWriter(transferToEnabled); - final ArrayList> dataToWrite = - new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); for (int i : new int[] { 1, 2, 3, 4, 4, 2 }) { dataToWrite.add(new Tuple2(i, i)); } @@ -403,7 +401,7 @@ public void mergeSpillsWithFileStreamAndNoCompression() throws Exception { public void writeEnoughDataToTriggerSpill() throws Exception { memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES); final UnsafeShuffleWriter writer = createWriter(false); - final ArrayList> dataToWrite = new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); final byte[] bigByteArray = new byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10]; for (int i = 0; i < 10 + 1; i++) { dataToWrite.add(new Tuple2(i, bigByteArray)); @@ -445,8 +443,7 @@ public void writeEnoughRecordsToTriggerSortBufferExpansionAndSpill() throws Exce @Test public void writeRecordsThatAreBiggerThanDiskWriteBufferSize() throws Exception { final UnsafeShuffleWriter writer = createWriter(false); - final ArrayList> dataToWrite = - new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); final byte[] bytes = new byte[(int) (ShuffleExternalSorter.DISK_WRITE_BUFFER_SIZE * 2.5)]; new Random(42).nextBytes(bytes); dataToWrite.add(new Tuple2(1, ByteBuffer.wrap(bytes))); @@ -461,7 +458,7 @@ public void writeRecordsThatAreBiggerThanDiskWriteBufferSize() throws Exception @Test public void writeRecordsThatAreBiggerThanMaxRecordSize() throws Exception { final UnsafeShuffleWriter writer = createWriter(false); - final ArrayList> dataToWrite = new ArrayList>(); + final ArrayList> dataToWrite = new ArrayList<>(); dataToWrite.add(new Tuple2(1, ByteBuffer.wrap(new byte[1]))); // We should be able to write a record that's right _at_ the max record size final byte[] atMaxRecordSize = new byte[(int) taskMemoryManager.pageSizeBytes() - 4]; @@ -498,7 +495,7 @@ public void testPeakMemoryUsed() throws Exception { taskMemoryManager = spy(taskMemoryManager); when(taskMemoryManager.pageSizeBytes()).thenReturn(pageSizeBytes); final UnsafeShuffleWriter writer = - new UnsafeShuffleWriter( + new UnsafeShuffleWriter<>( blockManager, shuffleBlockResolver, taskMemoryManager, diff --git a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java index 61b94b736d38e..9aab2265c9892 100644 --- a/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java +++ b/core/src/test/java/org/apache/spark/unsafe/map/AbstractBytesToBytesMapSuite.java @@ -66,7 +66,7 @@ public abstract class AbstractBytesToBytesMapSuite { private TaskMemoryManager taskMemoryManager; private static final long PAGE_SIZE_BYTES = 1L << 26; // 64 megabytes - final LinkedList spillFilesCreated = new LinkedList(); + final LinkedList spillFilesCreated = new LinkedList<>(); File tempDir; @Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager; @@ -397,7 +397,7 @@ public void randomizedStressTest() { final int size = 65536; // Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays // into ByteBuffers in order to use them as keys here. - final Map expected = new HashMap(); + final Map expected = new HashMap<>(); final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, size, PAGE_SIZE_BYTES); try { // Fill the map to 90% full so that we can trigger probing @@ -453,7 +453,7 @@ public void randomizedTestWithRecordsLargerThanPageSize() { final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 64, pageSizeBytes); // Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays // into ByteBuffers in order to use them as keys here. - final Map expected = new HashMap(); + final Map expected = new HashMap<>(); try { for (int i = 0; i < 1000; i++) { final byte[] key = getRandomByteArray(rand.nextInt(128)); diff --git a/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java b/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java index 45772b6d3c20d..e884b1bc123b8 100644 --- a/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java +++ b/core/src/test/java/org/apache/spark/util/collection/TestTimSort.java @@ -76,7 +76,7 @@ private static int[] createArray(List runs, int length) { * @param length The sum of all run lengths that will be added to runs. */ private static List runsJDKWorstCase(int minRun, int length) { - List runs = new ArrayList(); + List runs = new ArrayList<>(); long runningTotal = 0, Y = minRun + 4, X = minRun; diff --git a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java index 492fe49ba4c4f..b757ddc3b37f9 100644 --- a/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java +++ b/core/src/test/java/org/apache/spark/util/collection/unsafe/sort/UnsafeExternalSorterSuite.java @@ -56,7 +56,7 @@ public class UnsafeExternalSorterSuite { - final LinkedList spillFilesCreated = new LinkedList(); + final LinkedList spillFilesCreated = new LinkedList<>(); final TestMemoryManager memoryManager = new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false")); final TaskMemoryManager taskMemoryManager = new TaskMemoryManager(memoryManager, 0); diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md index c4d277f9bf958..89fe873851d8c 100644 --- a/docs/sql-programming-guide.md +++ b/docs/sql-programming-guide.md @@ -760,7 +760,7 @@ JavaRDD people = sc.textFile("examples/src/main/resources/people.txt"); String schemaString = "name age"; // Generate the schema based on the string of schema -List fields = new ArrayList(); +List fields = new ArrayList<>(); for (String fieldName: schemaString.split(" ")) { fields.add(DataTypes.createStructField(fieldName, DataTypes.StringType, true)); } @@ -1935,7 +1935,7 @@ val jdbcDF = sqlContext.read.format("jdbc").options( {% highlight java %} -Map options = new HashMap(); +Map options = new HashMap<>(); options.put("url", "jdbc:postgresql:dbserver"); options.put("dbtable", "schema.tablename"); diff --git a/docs/streaming-programming-guide.md b/docs/streaming-programming-guide.md index e92b01aa7774a..998644f2e23db 100644 --- a/docs/streaming-programming-guide.md +++ b/docs/streaming-programming-guide.md @@ -186,7 +186,7 @@ Next, we want to count these words. JavaPairDStream pairs = words.mapToPair( new PairFunction() { @Override public Tuple2 call(String s) { - return new Tuple2(s, 1); + return new Tuple2<>(s, 1); } }); JavaPairDStream wordCounts = pairs.reduceByKey( @@ -2095,7 +2095,7 @@ unifiedStream.print()
{% highlight java %} int numStreams = 5; -List> kafkaStreams = new ArrayList>(numStreams); +List> kafkaStreams = new ArrayList<>(numStreams); for (int i = 0; i < numStreams; i++) { kafkaStreams.add(KafkaUtils.createStream(...)); } diff --git a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java index 1a6caa8cf8647..8abc03e73d965 100644 --- a/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java +++ b/examples/src/main/java/org/apache/spark/examples/JavaLogQuery.java @@ -108,7 +108,7 @@ public static void main(String[] args) { JavaPairRDD, Stats> extracted = dataSet.mapToPair(new PairFunction, Stats>() { @Override public Tuple2, Stats> call(String s) { - return new Tuple2, Stats>(extractKey(s), extractStats(s)); + return new Tuple2<>(extractKey(s), extractStats(s)); } }); diff --git a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java index 635fb6a373c47..c3ef93c5b6325 100644 --- a/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java +++ b/examples/src/main/java/org/apache/spark/examples/JavaPageRank.java @@ -88,7 +88,7 @@ public static void main(String[] args) throws Exception { @Override public Tuple2 call(String s) { String[] parts = SPACES.split(s); - return new Tuple2(parts[0], parts[1]); + return new Tuple2<>(parts[0], parts[1]); } }).distinct().groupByKey().cache(); diff --git a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java index af874887445b1..04a57a6bfb58b 100644 --- a/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java +++ b/examples/src/main/java/org/apache/spark/examples/JavaSparkPi.java @@ -38,7 +38,7 @@ public static void main(String[] args) throws Exception { int slices = (args.length == 1) ? Integer.parseInt(args[0]) : 2; int n = 100000 * slices; - List l = new ArrayList(n); + List l = new ArrayList<>(n); for (int i = 0; i < n; i++) { l.add(i); } diff --git a/examples/src/main/java/org/apache/spark/examples/JavaTC.java b/examples/src/main/java/org/apache/spark/examples/JavaTC.java index 2563fcdd234bb..ca10384212da2 100644 --- a/examples/src/main/java/org/apache/spark/examples/JavaTC.java +++ b/examples/src/main/java/org/apache/spark/examples/JavaTC.java @@ -41,16 +41,16 @@ public final class JavaTC { private static final Random rand = new Random(42); static List> generateGraph() { - Set> edges = new HashSet>(numEdges); + Set> edges = new HashSet<>(numEdges); while (edges.size() < numEdges) { int from = rand.nextInt(numVertices); int to = rand.nextInt(numVertices); - Tuple2 e = new Tuple2(from, to); + Tuple2 e = new Tuple2<>(from, to); if (from != to) { edges.add(e); } } - return new ArrayList>(edges); + return new ArrayList<>(edges); } static class ProjectFn implements PairFunction>, @@ -59,7 +59,7 @@ static class ProjectFn implements PairFunction call(Tuple2> triple) { - return new Tuple2(triple._2()._2(), triple._2()._1()); + return new Tuple2<>(triple._2()._2(), triple._2()._1()); } } @@ -79,7 +79,7 @@ public static void main(String[] args) { new PairFunction, Integer, Integer>() { @Override public Tuple2 call(Tuple2 e) { - return new Tuple2(e._2(), e._1()); + return new Tuple2<>(e._2(), e._1()); } }); diff --git a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java index d746a3d2b6773..84dbea5caa135 100644 --- a/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/JavaWordCount.java @@ -55,7 +55,7 @@ public Iterator call(String s) { JavaPairRDD ones = words.mapToPair(new PairFunction() { @Override public Tuple2 call(String s) { - return new Tuple2(s, 1); + return new Tuple2<>(s, 1); } }); diff --git a/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java b/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java index 2898accec61b0..c1f00dde0e60a 100644 --- a/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java +++ b/examples/src/main/java/org/apache/spark/examples/ml/JavaElementwiseProductExample.java @@ -52,7 +52,7 @@ public static void main(String[] args) { RowFactory.create("b", Vectors.dense(4.0, 5.0, 6.0)) )); - List fields = new ArrayList(2); + List fields = new ArrayList<>(2); fields.add(DataTypes.createStructField("id", DataTypes.StringType, false)); fields.add(DataTypes.createStructField("vector", new VectorUDT(), false)); diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java index 5839b0cf8a8f8..66387b9df51c7 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeClassificationExample.java @@ -54,7 +54,7 @@ public static void main(String[] args) { // Set parameters. // Empty categoricalFeaturesInfo indicates all features are continuous. Integer numClasses = 2; - Map categoricalFeaturesInfo = new HashMap(); + Map categoricalFeaturesInfo = new HashMap<>(); String impurity = "gini"; Integer maxDepth = 5; Integer maxBins = 32; @@ -68,7 +68,7 @@ public static void main(String[] args) { testData.mapToPair(new PairFunction() { @Override public Tuple2 call(LabeledPoint p) { - return new Tuple2(model.predict(p.features()), p.label()); + return new Tuple2<>(model.predict(p.features()), p.label()); } }); Double testErr = diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java index ccde578249f7c..904e7f7e9505e 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaDecisionTreeRegressionExample.java @@ -54,7 +54,7 @@ public static void main(String[] args) { // Set parameters. // Empty categoricalFeaturesInfo indicates all features are continuous. - Map categoricalFeaturesInfo = new HashMap(); + Map categoricalFeaturesInfo = new HashMap<>(); String impurity = "variance"; Integer maxDepth = 5; Integer maxBins = 32; @@ -68,7 +68,7 @@ public static void main(String[] args) { testData.mapToPair(new PairFunction() { @Override public Tuple2 call(LabeledPoint p) { - return new Tuple2(model.predict(p.features()), p.label()); + return new Tuple2<>(model.predict(p.features()), p.label()); } }); Double testMSE = diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java index 0c2e4c928bb2c..213949e525dc2 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingClassificationExample.java @@ -58,7 +58,7 @@ public static void main(String[] args) { boostingStrategy.getTreeStrategy().setNumClasses(2); boostingStrategy.getTreeStrategy().setMaxDepth(5); // Empty categoricalFeaturesInfo indicates all features are continuous. - Map categoricalFeaturesInfo = new HashMap(); + Map categoricalFeaturesInfo = new HashMap<>(); boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo); final GradientBoostedTreesModel model = @@ -69,7 +69,7 @@ public static void main(String[] args) { testData.mapToPair(new PairFunction() { @Override public Tuple2 call(LabeledPoint p) { - return new Tuple2(model.predict(p.features()), p.label()); + return new Tuple2<>(model.predict(p.features()), p.label()); } }); Double testErr = diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java index c1bc2098dcd7e..78db442dbc99d 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaGradientBoostingRegressionExample.java @@ -57,7 +57,7 @@ public static void main(String[] args) { boostingStrategy.setNumIterations(3); // Note: Use more iterations in practice. boostingStrategy.getTreeStrategy().setMaxDepth(5); // Empty categoricalFeaturesInfo indicates all features are continuous. - Map categoricalFeaturesInfo = new HashMap(); + Map categoricalFeaturesInfo = new HashMap<>(); boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo); final GradientBoostedTreesModel model = @@ -68,7 +68,7 @@ public static void main(String[] args) { testData.mapToPair(new PairFunction() { @Override public Tuple2 call(LabeledPoint p) { - return new Tuple2(model.predict(p.features()), p.label()); + return new Tuple2<>(model.predict(p.features()), p.label()); } }); Double testMSE = diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java index e632e35110920..0e15f755083bf 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaIsotonicRegressionExample.java @@ -62,7 +62,7 @@ public Tuple3 call(String line) { @Override public Tuple2 call(Tuple3 point) { Double predictedLabel = model.predict(point._2()); - return new Tuple2(predictedLabel, point._1()); + return new Tuple2<>(predictedLabel, point._1()); } } ); diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java index 3e50118c0d9ec..9ca9a7847c463 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaLinearRegressionWithSGDExample.java @@ -70,7 +70,7 @@ public LabeledPoint call(String line) { new Function>() { public Tuple2 call(LabeledPoint point) { double prediction = model.predict(point.features()); - return new Tuple2(prediction, point.label()); + return new Tuple2<>(prediction, point.label()); } } ); diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java index 478e615123e6d..2b17dbb96365e 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaNaiveBayesExample.java @@ -46,7 +46,7 @@ public static void main(String[] args) { test.mapToPair(new PairFunction() { @Override public Tuple2 call(LabeledPoint p) { - return new Tuple2(model.predict(p.features()), p.label()); + return new Tuple2<>(model.predict(p.features()), p.label()); } }); double accuracy = predictionAndLabel.filter(new Function, Boolean>() { diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java index faf76a9540e77..a42c29f52fb65 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPCAExample.java @@ -42,7 +42,7 @@ public static void main(String[] args) { // $example on$ double[][] array = {{1.12, 2.05, 3.12}, {5.56, 6.28, 8.94}, {10.2, 8.0, 20.5}}; - LinkedList rowsList = new LinkedList(); + LinkedList rowsList = new LinkedList<>(); for (int i = 0; i < array.length; i++) { Vector currentRow = Vectors.dense(array[i]); rowsList.add(currentRow); diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java index f4c9d8a35dc96..24af5d0180ce4 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestClassificationExample.java @@ -50,7 +50,7 @@ public static void main(String[] args) { // Train a RandomForest model. // Empty categoricalFeaturesInfo indicates all features are continuous. Integer numClasses = 2; - HashMap categoricalFeaturesInfo = new HashMap(); + HashMap categoricalFeaturesInfo = new HashMap<>(); Integer numTrees = 3; // Use more in practice. String featureSubsetStrategy = "auto"; // Let the algorithm choose. String impurity = "gini"; @@ -67,7 +67,7 @@ public static void main(String[] args) { testData.mapToPair(new PairFunction() { @Override public Tuple2 call(LabeledPoint p) { - return new Tuple2(model.predict(p.features()), p.label()); + return new Tuple2<>(model.predict(p.features()), p.label()); } }); Double testErr = diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java index c71125ce414a0..afa9045878db3 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRandomForestRegressionExample.java @@ -51,7 +51,7 @@ public static void main(String[] args) { // Set parameters. // Empty categoricalFeaturesInfo indicates all features are continuous. - Map categoricalFeaturesInfo = new HashMap(); + Map categoricalFeaturesInfo = new HashMap<>(); Integer numTrees = 3; // Use more in practice. String featureSubsetStrategy = "auto"; // Let the algorithm choose. String impurity = "variance"; @@ -67,7 +67,7 @@ public static void main(String[] args) { testData.mapToPair(new PairFunction() { @Override public Tuple2 call(LabeledPoint p) { - return new Tuple2(model.predict(p.features()), p.label()); + return new Tuple2<>(model.predict(p.features()), p.label()); } }); Double testMSE = diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java index 5e643420ada15..f69aa4b75a56c 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaRecommendationExample.java @@ -64,8 +64,7 @@ public Tuple2 call(Rating r) { model.predict(JavaRDD.toRDD(userProducts)).toJavaRDD().map( new Function, Double>>() { public Tuple2, Double> call(Rating r){ - return new Tuple2, Double>( - new Tuple2(r.user(), r.product()), r.rating()); + return new Tuple2<>(new Tuple2<>(r.user(), r.product()), r.rating()); } } )); @@ -73,8 +72,7 @@ public Tuple2, Double> call(Rating r){ JavaPairRDD.fromJavaRDD(ratings.map( new Function, Double>>() { public Tuple2, Double> call(Rating r){ - return new Tuple2, Double>( - new Tuple2(r.user(), r.product()), r.rating()); + return new Tuple2<>(new Tuple2<>(r.user(), r.product()), r.rating()); } } )).join(predictions).values(); diff --git a/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java b/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java index b417da8f85cf5..3730e60f68803 100644 --- a/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java +++ b/examples/src/main/java/org/apache/spark/examples/mllib/JavaSVDExample.java @@ -44,7 +44,7 @@ public static void main(String[] args) { // $example on$ double[][] array = {{1.12, 2.05, 3.12}, {5.56, 6.28, 8.94}, {10.2, 8.0, 20.5}}; - LinkedList rowsList = new LinkedList(); + LinkedList rowsList = new LinkedList<>(); for (int i = 0; i < array.length; i++) { Vector currentRow = Vectors.dense(array[i]); rowsList.add(currentRow); diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java index 7bb70d0c067df..7884b8cdfff84 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaActorWordCount.java @@ -129,7 +129,7 @@ public Iterator call(String s) { }).mapToPair(new PairFunction() { @Override public Tuple2 call(String s) { - return new Tuple2(s, 1); + return new Tuple2<>(s, 1); } }).reduceByKey(new Function2() { @Override diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java index 3d668adcf815f..5de56340c6d22 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaCustomReceiver.java @@ -82,7 +82,7 @@ public Iterator call(String x) { JavaPairDStream wordCounts = words.mapToPair( new PairFunction() { @Override public Tuple2 call(String s) { - return new Tuple2(s, 1); + return new Tuple2<>(s, 1); } }).reduceByKey(new Function2() { @Override diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java index 5107500a127c5..bfbad91e4fdfa 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaDirectKafkaWordCount.java @@ -63,8 +63,8 @@ public static void main(String[] args) { SparkConf sparkConf = new SparkConf().setAppName("JavaDirectKafkaWordCount"); JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(2)); - HashSet topicsSet = new HashSet(Arrays.asList(topics.split(","))); - HashMap kafkaParams = new HashMap(); + HashSet topicsSet = new HashSet<>(Arrays.asList(topics.split(","))); + HashMap kafkaParams = new HashMap<>(); kafkaParams.put("metadata.broker.list", brokers); // Create direct kafka stream with brokers and topics @@ -95,7 +95,7 @@ public Iterator call(String x) { new PairFunction() { @Override public Tuple2 call(String s) { - return new Tuple2(s, 1); + return new Tuple2<>(s, 1); } }).reduceByKey( new Function2() { diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java index 0df4cb40a9a76..655da6840cc57 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaKafkaWordCount.java @@ -69,7 +69,7 @@ public static void main(String[] args) { JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000)); int numThreads = Integer.parseInt(args[3]); - Map topicMap = new HashMap(); + Map topicMap = new HashMap<>(); String[] topics = args[2].split(","); for (String topic: topics) { topicMap.put(topic, numThreads); @@ -96,7 +96,7 @@ public Iterator call(String x) { new PairFunction() { @Override public Tuple2 call(String s) { - return new Tuple2(s, 1); + return new Tuple2<>(s, 1); } }).reduceByKey(new Function2() { @Override diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java index b82b319acb735..5761da684b467 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaNetworkWordCount.java @@ -76,7 +76,7 @@ public Iterator call(String x) { new PairFunction() { @Override public Tuple2 call(String s) { - return new Tuple2(s, 1); + return new Tuple2<>(s, 1); } }).reduceByKey(new Function2() { @Override diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java index 4ce8437f82705..426eaa5f0adea 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaQueueStream.java @@ -50,7 +50,7 @@ public static void main(String[] args) throws Exception { // Create the queue through which RDDs can be pushed to // a QueueInputDStream - Queue> rddQueue = new LinkedList>(); + Queue> rddQueue = new LinkedList<>(); // Create and push some RDDs into the queue List list = Lists.newArrayList(); @@ -68,7 +68,7 @@ public static void main(String[] args) throws Exception { new PairFunction() { @Override public Tuple2 call(Integer i) { - return new Tuple2(i % 10, 1); + return new Tuple2<>(i % 10, 1); } }); JavaPairDStream reducedStream = mappedStream.reduceByKey( diff --git a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java index f9929fc86dc7f..a597ecbc5bcb3 100644 --- a/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java +++ b/examples/src/main/java/org/apache/spark/examples/streaming/JavaRecoverableNetworkWordCount.java @@ -142,7 +142,7 @@ public Iterator call(String x) { new PairFunction() { @Override public Tuple2 call(String s) { - return new Tuple2(s, 1); + return new Tuple2<>(s, 1); } }).reduceByKey(new Function2() { @Override diff --git a/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java b/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java index 64e044aa8e4a4..5dc825dfdc911 100644 --- a/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java +++ b/extras/kinesis-asl/src/main/java/org/apache/spark/examples/streaming/JavaKinesisWordCountASL.java @@ -136,7 +136,7 @@ public static void main(String[] args) { JavaStreamingContext jssc = new JavaStreamingContext(sparkConfig, batchInterval); // Create the Kinesis DStreams - List> streamsList = new ArrayList>(numStreams); + List> streamsList = new ArrayList<>(numStreams); for (int i = 0; i < numStreams; i++) { streamsList.add( KinesisUtils.createStream(jssc, kinesisAppName, streamName, endpointUrl, regionName, diff --git a/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java index c7ab51357c1fb..46410327a5d72 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java +++ b/launcher/src/main/java/org/apache/spark/launcher/AbstractCommandBuilder.java @@ -58,12 +58,12 @@ abstract class AbstractCommandBuilder { private Map effectiveConfig; public AbstractCommandBuilder() { - this.appArgs = new ArrayList(); - this.childEnv = new HashMap(); - this.conf = new HashMap(); - this.files = new ArrayList(); - this.jars = new ArrayList(); - this.pyFiles = new ArrayList(); + this.appArgs = new ArrayList<>(); + this.childEnv = new HashMap<>(); + this.conf = new HashMap<>(); + this.files = new ArrayList<>(); + this.jars = new ArrayList<>(); + this.pyFiles = new ArrayList<>(); } /** @@ -87,7 +87,7 @@ public AbstractCommandBuilder() { * class. */ List buildJavaCommand(String extraClassPath) throws IOException { - List cmd = new ArrayList(); + List cmd = new ArrayList<>(); String envJavaHome; if (javaHome != null) { @@ -134,7 +134,7 @@ void addOptionString(List cmd, String options) { List buildClassPath(String appClassPath) throws IOException { String sparkHome = getSparkHome(); - List cp = new ArrayList(); + List cp = new ArrayList<>(); addToClassPath(cp, getenv("SPARK_CLASSPATH")); addToClassPath(cp, appClassPath); diff --git a/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java b/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java index e328c8a341c28..7942d7372faff 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java +++ b/launcher/src/main/java/org/apache/spark/launcher/CommandBuilderUtils.java @@ -147,7 +147,7 @@ static void mergeEnvPathList(Map userEnv, String envKey, String * Output: [ "ab cd", "efgh", "i \" j" ] */ static List parseOptionString(String s) { - List opts = new ArrayList(); + List opts = new ArrayList<>(); StringBuilder opt = new StringBuilder(); boolean inOpt = false; boolean inSingleQuote = false; diff --git a/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java b/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java index 414ffc2c84e52..69fbf4387bdfb 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java +++ b/launcher/src/main/java/org/apache/spark/launcher/LauncherServer.java @@ -129,7 +129,7 @@ private LauncherServer() throws IOException { server.setReuseAddress(true); server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0)); - this.clients = new ArrayList(); + this.clients = new ArrayList<>(); this.threadIds = new AtomicLong(); this.factory = new NamedThreadFactory(THREAD_NAME_FMT); this.pending = new ConcurrentHashMap<>(); diff --git a/launcher/src/main/java/org/apache/spark/launcher/Main.java b/launcher/src/main/java/org/apache/spark/launcher/Main.java index e751e948e3561..1e34bb8c73279 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/Main.java +++ b/launcher/src/main/java/org/apache/spark/launcher/Main.java @@ -50,7 +50,7 @@ class Main { public static void main(String[] argsArray) throws Exception { checkArgument(argsArray.length > 0, "Not enough arguments: missing class name."); - List args = new ArrayList(Arrays.asList(argsArray)); + List args = new ArrayList<>(Arrays.asList(argsArray)); String className = args.remove(0); boolean printLaunchCommand = !isEmpty(System.getenv("SPARK_PRINT_LAUNCH_COMMAND")); @@ -70,7 +70,7 @@ public static void main(String[] argsArray) throws Exception { // Ignore parsing exceptions. } - List help = new ArrayList(); + List help = new ArrayList<>(); if (parser.className != null) { help.add(parser.CLASS); help.add(parser.className); @@ -82,7 +82,7 @@ public static void main(String[] argsArray) throws Exception { builder = new SparkClassCommandBuilder(className, args); } - Map env = new HashMap(); + Map env = new HashMap<>(); List cmd = builder.buildCommand(env); if (printLaunchCommand) { System.err.println("Spark Command: " + join(" ", cmd)); @@ -130,7 +130,7 @@ private static List prepareBashCommand(List cmd, Map newCmd = new ArrayList(); + List newCmd = new ArrayList<>(); newCmd.add("env"); for (Map.Entry e : childEnv.entrySet()) { diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java index e575fd33080a2..40187236f2f2d 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java +++ b/launcher/src/main/java/org/apache/spark/launcher/SparkClassCommandBuilder.java @@ -44,7 +44,7 @@ class SparkClassCommandBuilder extends AbstractCommandBuilder { @Override public List buildCommand(Map env) throws IOException { - List javaOptsKeys = new ArrayList(); + List javaOptsKeys = new ArrayList<>(); String memKey = null; String extraClassPath = null; diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java index 20e6003a00c19..a54215990137e 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java +++ b/launcher/src/main/java/org/apache/spark/launcher/SparkLauncher.java @@ -75,7 +75,7 @@ public class SparkLauncher { /** Used internally to create unique logger names. */ private static final AtomicInteger COUNTER = new AtomicInteger(); - static final Map launcherConfig = new HashMap(); + static final Map launcherConfig = new HashMap<>(); /** * Set a configuration value for the launcher library. These config values do not affect the @@ -428,7 +428,7 @@ public SparkAppHandle startApplication(SparkAppHandle.Listener... listeners) thr } private ProcessBuilder createBuilder() { - List cmd = new ArrayList(); + List cmd = new ArrayList<>(); String script = isWindows() ? "spark-submit.cmd" : "spark-submit"; cmd.add(join(File.separator, builder.getSparkHome(), "bin", script)); cmd.addAll(builder.buildSparkSubmitArgs()); @@ -437,7 +437,7 @@ private ProcessBuilder createBuilder() { // preserved, otherwise the batch interpreter will mess up the arguments. Batch scripts are // weird. if (isWindows()) { - List winCmd = new ArrayList(); + List winCmd = new ArrayList<>(); for (String arg : cmd) { winCmd.add(quoteForBatchScript(arg)); } diff --git a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java index 269c89c310550..b2dd6ac4c3982 100644 --- a/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java +++ b/launcher/src/main/java/org/apache/spark/launcher/SparkSubmitCommandBuilder.java @@ -67,7 +67,7 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder { * command line parsing works. This maps the class name to the resource to use when calling * spark-submit. */ - private static final Map specialClasses = new HashMap(); + private static final Map specialClasses = new HashMap<>(); static { specialClasses.put("org.apache.spark.repl.Main", "spark-shell"); specialClasses.put("org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver", @@ -87,12 +87,12 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder { private boolean allowsMixedArguments; SparkSubmitCommandBuilder() { - this.sparkArgs = new ArrayList(); + this.sparkArgs = new ArrayList<>(); this.printInfo = false; } SparkSubmitCommandBuilder(List args) { - this.sparkArgs = new ArrayList(); + this.sparkArgs = new ArrayList<>(); List submitArgs = args; if (args.size() > 0 && args.get(0).equals(PYSPARK_SHELL)) { this.allowsMixedArguments = true; @@ -123,7 +123,7 @@ public List buildCommand(Map env) throws IOException { } List buildSparkSubmitArgs() { - List args = new ArrayList(); + List args = new ArrayList<>(); SparkSubmitOptionParser parser = new SparkSubmitOptionParser(); if (verbose) { @@ -244,7 +244,7 @@ private List buildPySparkShellCommand(Map env) throws IO // The executable is the PYSPARK_DRIVER_PYTHON env variable set by the pyspark script, // followed by PYSPARK_DRIVER_PYTHON_OPTS. - List pyargs = new ArrayList(); + List pyargs = new ArrayList<>(); pyargs.add(firstNonEmpty(System.getenv("PYSPARK_DRIVER_PYTHON"), "python")); String pyOpts = System.getenv("PYSPARK_DRIVER_PYTHON_OPTS"); if (!isEmpty(pyOpts)) { @@ -270,7 +270,7 @@ private List buildSparkRCommand(Map env) throws IOExcept env.put("R_PROFILE_USER", join(File.separator, sparkHome, "R", "lib", "SparkR", "profile", "shell.R")); - List args = new ArrayList(); + List args = new ArrayList<>(); args.add(firstNonEmpty(System.getenv("SPARKR_DRIVER_R"), "R")); return args; } diff --git a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java index 6aad47adbcc82..d36731840b1a1 100644 --- a/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java +++ b/launcher/src/test/java/org/apache/spark/launcher/SparkSubmitCommandBuilderSuite.java @@ -73,7 +73,7 @@ public void testCliParser() throws Exception { "spark.randomOption=foo", parser.CONF, SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH + "=/driverLibPath"); - Map env = new HashMap(); + Map env = new HashMap<>(); List cmd = buildCommand(sparkSubmitArgs, env); assertTrue(findInStringList(env.get(CommandBuilderUtils.getLibPathEnvName()), @@ -125,7 +125,7 @@ public void testPySparkLauncher() throws Exception { "--master=foo", "--deploy-mode=bar"); - Map env = new HashMap(); + Map env = new HashMap<>(); List cmd = buildCommand(sparkSubmitArgs, env); assertEquals("python", cmd.get(cmd.size() - 1)); assertEquals( @@ -142,7 +142,7 @@ public void testPySparkFallback() throws Exception { "script.py", "arg1"); - Map env = new HashMap(); + Map env = new HashMap<>(); List cmd = buildCommand(sparkSubmitArgs, env); assertEquals("foo", findArgValue(cmd, "--master")); @@ -178,7 +178,7 @@ private void testCmdBuilder(boolean isDriver, boolean useDefaultPropertyFile) th + "/launcher/src/test/resources"); } - Map env = new HashMap(); + Map env = new HashMap<>(); List cmd = launcher.buildCommand(env); // Checks below are different for driver and non-driver mode. @@ -258,7 +258,7 @@ private boolean contains(String needle, String[] haystack) { } private Map parseConf(List cmd, SparkSubmitOptionParser parser) { - Map conf = new HashMap(); + Map conf = new HashMap<>(); for (int i = 0; i < cmd.size(); i++) { if (cmd.get(i).equals(parser.CONF)) { String[] val = cmd.get(i + 1).split("=", 2); diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java index 60f25e5cce437..40b9c35adc431 100644 --- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaDecisionTreeClassifierSuite.java @@ -56,7 +56,7 @@ public void runDT() { JavaRDD data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map categoricalFeatures = new HashMap(); + Map categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java index 3c69467fa119e..59b6fba7a928a 100644 --- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaGBTClassifierSuite.java @@ -56,7 +56,7 @@ public void runDT() { JavaRDD data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map categoricalFeatures = new HashMap(); + Map categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java b/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java index a66a1e12927be..5485fcbf01bda 100644 --- a/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/classification/JavaRandomForestClassifierSuite.java @@ -57,7 +57,7 @@ public void runDT() { JavaRDD data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map categoricalFeatures = new HashMap(); + Map categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java index ebe800e749e05..d5c9d120c592c 100644 --- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaDecisionTreeRegressorSuite.java @@ -56,7 +56,7 @@ public void runDT() { JavaRDD data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map categoricalFeatures = new HashMap(); + Map categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java index fc8c13db07e6f..38d15dc2b7c78 100644 --- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaGBTRegressorSuite.java @@ -56,7 +56,7 @@ public void runDT() { JavaRDD data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map categoricalFeatures = new HashMap(); + Map categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0); GBTRegressor rf = new GBTRegressor() diff --git a/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java b/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java index a00ce5e249c34..31be8880c25e1 100644 --- a/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/regression/JavaRandomForestRegressorSuite.java @@ -57,7 +57,7 @@ public void runDT() { JavaRDD data = sc.parallelize( LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache(); - Map categoricalFeatures = new HashMap(); + Map categoricalFeatures = new HashMap<>(); DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0); // This tests setters. Training with various options is tested in Scala. diff --git a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java index 225a216270b3b..db19b309f65ae 100644 --- a/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java +++ b/mllib/src/test/java/org/apache/spark/mllib/clustering/JavaLDASuite.java @@ -45,9 +45,9 @@ public class JavaLDASuite implements Serializable { @Before public void setUp() { sc = new JavaSparkContext("local", "JavaLDA"); - ArrayList> tinyCorpus = new ArrayList>(); + ArrayList> tinyCorpus = new ArrayList<>(); for (int i = 0; i < LDASuite.tinyCorpus().length; i++) { - tinyCorpus.add(new Tuple2((Long)LDASuite.tinyCorpus()[i]._1(), + tinyCorpus.add(new Tuple2<>((Long)LDASuite.tinyCorpus()[i]._1(), LDASuite.tinyCorpus()[i]._2())); } JavaRDD> tmpCorpus = sc.parallelize(tinyCorpus, 2); @@ -189,8 +189,8 @@ public void localLdaMethods() { double logPerplexity = toyModel.logPerplexity(pairedDocs); // check: logLikelihood. - ArrayList> docsSingleWord = new ArrayList>(); - docsSingleWord.add(new Tuple2(0L, Vectors.dense(1.0, 0.0, 0.0))); + ArrayList> docsSingleWord = new ArrayList<>(); + docsSingleWord.add(new Tuple2<>(0L, Vectors.dense(1.0, 0.0, 0.0))); JavaPairRDD single = JavaPairRDD.fromJavaRDD(sc.parallelize(docsSingleWord)); double logLikelihood = toyModel.logLikelihood(single); } diff --git a/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java b/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java index 9925aae441af9..8dd29061daaad 100644 --- a/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java +++ b/mllib/src/test/java/org/apache/spark/mllib/tree/JavaDecisionTreeSuite.java @@ -64,7 +64,7 @@ int validatePrediction(List validationData, DecisionTreeModel mode public void runDTUsingConstructor() { List arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList(); JavaRDD rdd = sc.parallelize(arr); - HashMap categoricalFeaturesInfo = new HashMap(); + HashMap categoricalFeaturesInfo = new HashMap<>(); categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories int maxDepth = 4; @@ -84,7 +84,7 @@ public void runDTUsingConstructor() { public void runDTUsingStaticMethods() { List arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList(); JavaRDD rdd = sc.parallelize(arr); - HashMap categoricalFeaturesInfo = new HashMap(); + HashMap categoricalFeaturesInfo = new HashMap<>(); categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories int maxDepth = 4; diff --git a/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java b/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java index 17659d7d960b0..24adeadf95675 100644 --- a/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java +++ b/sql/catalyst/src/main/java/org/apache/spark/sql/types/DataTypes.java @@ -201,7 +201,7 @@ public static StructType createStructType(StructField[] fields) { if (fields == null) { throw new IllegalArgumentException("fields should not be null."); } - Set distinctNames = new HashSet(); + Set distinctNames = new HashSet<>(); for (StructField field : fields) { if (field == null) { throw new IllegalArgumentException( diff --git a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java index 6bcd155ccdc49..5c257bc260873 100644 --- a/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java +++ b/sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/SpecificParquetRecordReaderBase.java @@ -149,7 +149,7 @@ public void initialize(InputSplit inputSplit, TaskAttemptContext taskAttemptCont * by MapReduce. */ public static List listDirectory(File path) throws IOException { - List result = new ArrayList(); + List result = new ArrayList<>(); if (path.isDirectory()) { for (File f: path.listFiles()) { result.addAll(listDirectory(f)); diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java index 640efcc737eaa..51f987fda9de7 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaApplySchemaSuite.java @@ -111,7 +111,7 @@ public Row call(Person person) throws Exception { df.registerTempTable("people"); Row[] actual = sqlContext.sql("SELECT * FROM people").collect(); - List expected = new ArrayList(2); + List expected = new ArrayList<>(2); expected.add(RowFactory.create("Michael", 29)); expected.add(RowFactory.create("Yin", 28)); diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java index 9b624f318c6f8..b054b1095b2b0 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDatasetSuite.java @@ -67,7 +67,7 @@ public void tearDown() { } private Tuple2 tuple2(T1 t1, T2 t2) { - return new Tuple2(t1, t2); + return new Tuple2<>(t1, t2); } @Test diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java index 5a167edd89592..ae0c097c362ab 100644 --- a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java +++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleAvg.java @@ -42,14 +42,14 @@ public class MyDoubleAvg extends UserDefinedAggregateFunction { private DataType _returnDataType; public MyDoubleAvg() { - List inputFields = new ArrayList(); + List inputFields = new ArrayList<>(); inputFields.add(DataTypes.createStructField("inputDouble", DataTypes.DoubleType, true)); _inputDataType = DataTypes.createStructType(inputFields); // The buffer has two values, bufferSum for storing the current sum and // bufferCount for storing the number of non-null input values that have been contribuetd // to the current sum. - List bufferFields = new ArrayList(); + List bufferFields = new ArrayList<>(); bufferFields.add(DataTypes.createStructField("bufferSum", DataTypes.DoubleType, true)); bufferFields.add(DataTypes.createStructField("bufferCount", DataTypes.LongType, true)); _bufferSchema = DataTypes.createStructType(bufferFields); diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java index c3b7768e71bf8..d17fb3e5194f3 100644 --- a/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java +++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/aggregate/MyDoubleSum.java @@ -41,11 +41,11 @@ public class MyDoubleSum extends UserDefinedAggregateFunction { private DataType _returnDataType; public MyDoubleSum() { - List inputFields = new ArrayList(); + List inputFields = new ArrayList<>(); inputFields.add(DataTypes.createStructField("inputDouble", DataTypes.DoubleType, true)); _inputDataType = DataTypes.createStructType(inputFields); - List bufferFields = new ArrayList(); + List bufferFields = new ArrayList<>(); bufferFields.add(DataTypes.createStructField("bufferDouble", DataTypes.DoubleType, true)); _bufferSchema = DataTypes.createStructType(bufferFields); diff --git a/sql/hive/src/test/java/org/apache/spark/sql/hive/test/Complex.java b/sql/hive/src/test/java/org/apache/spark/sql/hive/test/Complex.java index 4ef1f276d1bbb..fc24600a1e4a7 100644 --- a/sql/hive/src/test/java/org/apache/spark/sql/hive/test/Complex.java +++ b/sql/hive/src/test/java/org/apache/spark/sql/hive/test/Complex.java @@ -50,7 +50,7 @@ public class Complex implements org.apache.thrift.TBase, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + private static final Map, SchemeFactory> schemes = new HashMap<>(); static { schemes.put(StandardScheme.class, new ComplexStandardSchemeFactory()); schemes.put(TupleScheme.class, new ComplexTupleSchemeFactory()); @@ -72,7 +72,7 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { LINT_STRING((short)5, "lintString"), M_STRING_STRING((short)6, "mStringString"); - private static final Map byName = new HashMap(); + private static final Map byName = new HashMap<>(); static { for (_Fields field : EnumSet.allOf(_Fields.class)) { @@ -141,7 +141,7 @@ public String getFieldName() { private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { - Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<>(_Fields.class); tmpMap.put(_Fields.AINT, new org.apache.thrift.meta_data.FieldMetaData("aint", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); tmpMap.put(_Fields.A_STRING, new org.apache.thrift.meta_data.FieldMetaData("aString", org.apache.thrift.TFieldRequirementType.DEFAULT, @@ -194,28 +194,28 @@ public Complex(Complex other) { this.aString = other.aString; } if (other.isSetLint()) { - List __this__lint = new ArrayList(); + List __this__lint = new ArrayList<>(); for (Integer other_element : other.lint) { __this__lint.add(other_element); } this.lint = __this__lint; } if (other.isSetLString()) { - List __this__lString = new ArrayList(); + List __this__lString = new ArrayList<>(); for (String other_element : other.lString) { __this__lString.add(other_element); } this.lString = __this__lString; } if (other.isSetLintString()) { - List __this__lintString = new ArrayList(); + List __this__lintString = new ArrayList<>(); for (IntString other_element : other.lintString) { __this__lintString.add(new IntString(other_element)); } this.lintString = __this__lintString; } if (other.isSetMStringString()) { - Map __this__mStringString = new HashMap(); + Map __this__mStringString = new HashMap<>(); for (Map.Entry other_element : other.mStringString.entrySet()) { String other_element_key = other_element.getKey(); @@ -339,7 +339,7 @@ public java.util.Iterator getLStringIterator() { public void addToLString(String elem) { if (this.lString == null) { - this.lString = new ArrayList(); + this.lString = new ArrayList<>(); } this.lString.add(elem); } @@ -411,7 +411,7 @@ public int getMStringStringSize() { public void putToMStringString(String key, String val) { if (this.mStringString == null) { - this.mStringString = new HashMap(); + this.mStringString = new HashMap<>(); } this.mStringString.put(key, val); } @@ -876,7 +876,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Complex struct) thr if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); - struct.lint = new ArrayList(_list0.size); + struct.lint = new ArrayList<>(_list0.size); for (int _i1 = 0; _i1 < _list0.size; ++_i1) { int _elem2; // required @@ -894,7 +894,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Complex struct) thr if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list3 = iprot.readListBegin(); - struct.lString = new ArrayList(_list3.size); + struct.lString = new ArrayList<>(_list3.size); for (int _i4 = 0; _i4 < _list3.size; ++_i4) { String _elem5; // required @@ -912,7 +912,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Complex struct) thr if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { org.apache.thrift.protocol.TList _list6 = iprot.readListBegin(); - struct.lintString = new ArrayList(_list6.size); + struct.lintString = new ArrayList<>(_list6.size); for (int _i7 = 0; _i7 < _list6.size; ++_i7) { IntString _elem8; // required @@ -1114,7 +1114,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Complex struct) thro if (incoming.get(2)) { { org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32()); - struct.lint = new ArrayList(_list21.size); + struct.lint = new ArrayList<>(_list21.size); for (int _i22 = 0; _i22 < _list21.size; ++_i22) { int _elem23; // required @@ -1127,7 +1127,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Complex struct) thro if (incoming.get(3)) { { org.apache.thrift.protocol.TList _list24 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.lString = new ArrayList(_list24.size); + struct.lString = new ArrayList<>(_list24.size); for (int _i25 = 0; _i25 < _list24.size; ++_i25) { String _elem26; // required @@ -1140,7 +1140,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Complex struct) thro if (incoming.get(4)) { { org.apache.thrift.protocol.TList _list27 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.lintString = new ArrayList(_list27.size); + struct.lintString = new ArrayList<>(_list27.size); for (int _i28 = 0; _i28 < _list27.size; ++_i28) { IntString _elem29; // required @@ -1154,7 +1154,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Complex struct) thro if (incoming.get(5)) { { org.apache.thrift.protocol.TMap _map30 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.mStringString = new HashMap(2*_map30.size); + struct.mStringString = new HashMap<>(2*_map30.size); for (int _i31 = 0; _i31 < _map30.size; ++_i31) { String _key32; // required