Skip to content

Commit

Permalink
[SPARK-13702][CORE][SQL][MLLIB] Use diamond operator for generic inst…
Browse files Browse the repository at this point in the history
…ance creation in Java code.

## What changes were proposed in this pull request?

In order to make `docs/examples` (and other related code) more simple/readable/user-friendly, this PR replaces existing codes like the followings by using `diamond` operator.

```
-    final ArrayList<Product2<Object, Object>> dataToWrite =
-      new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
```

Java 7 or higher supports **diamond** operator which replaces the type arguments required to invoke the constructor of a generic class with an empty set of type parameters (<>). Currently, Spark Java code use mixed usage of this.

## How was this patch tested?

Manual.
Pass the existing tests.

Author: Dongjoon Hyun <[email protected]>

Closes apache#11541 from dongjoon-hyun/SPARK-13702.
  • Loading branch information
dongjoon-hyun authored and srowen committed Mar 9, 2016
1 parent cbff280 commit c3689bc
Show file tree
Hide file tree
Showing 59 changed files with 129 additions and 134 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -194,8 +194,8 @@ private TransportClient createClient(InetSocketAddress address) throws IOExcepti
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs())
.option(ChannelOption.ALLOCATOR, pooledAllocator);

final AtomicReference<TransportClient> clientRef = new AtomicReference<TransportClient>();
final AtomicReference<Channel> channelRef = new AtomicReference<Channel>();
final AtomicReference<TransportClient> clientRef = new AtomicReference<>();
final AtomicReference<Channel> channelRef = new AtomicReference<>();

bootstrap.handler(new ChannelInitializer<SocketChannel>() {
@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,9 @@ final class ShuffleExternalSorter extends MemoryConsumer {
* this might not be necessary if we maintained a pool of re-usable pages in the TaskMemoryManager
* itself).
*/
private final LinkedList<MemoryBlock> allocatedPages = new LinkedList<MemoryBlock>();
private final LinkedList<MemoryBlock> allocatedPages = new LinkedList<>();

private final LinkedList<SpillInfo> spills = new LinkedList<SpillInfo>();
private final LinkedList<SpillInfo> spills = new LinkedList<>();

/** Peak memory used by this sorter so far, in bytes. **/
private long peakMemoryUsedBytes;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ public enum TaskSorting {

private final Set<String> alternateNames;
private TaskSorting(String... names) {
alternateNames = new HashSet<String>();
alternateNames = new HashSet<>();
for (String n: names) {
alternateNames.add(n);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ public void testSparkArgumentHandling() throws Exception {
@Test
public void testChildProcLauncher() throws Exception {
SparkSubmitOptionParser opts = new SparkSubmitOptionParser();
Map<String, String> env = new HashMap<String, String>();
Map<String, String> env = new HashMap<>();
env.put("SPARK_PRINT_LAUNCH_COMMAND", "1");

SparkLauncher launcher = new SparkLauncher(env)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ public class UnsafeShuffleWriterSuite {
File mergedOutputFile;
File tempDir;
long[] partitionSizesInMergedFile;
final LinkedList<File> spillFilesCreated = new LinkedList<File>();
final LinkedList<File> spillFilesCreated = new LinkedList<>();
SparkConf conf;
final Serializer serializer = new KryoSerializer(new SparkConf());
TaskMetrics taskMetrics;
Expand Down Expand Up @@ -217,7 +217,7 @@ private void assertSpillFilesWereCleanedUp() {
}

private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException {
final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<Tuple2<Object, Object>>();
final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>();
long startOffset = 0;
for (int i = 0; i < NUM_PARTITITONS; i++) {
final long partitionSize = partitionSizesInMergedFile[i];
Expand Down Expand Up @@ -286,8 +286,7 @@ public void writeEmptyIterator() throws Exception {
@Test
public void writeWithoutSpilling() throws Exception {
// In this example, each partition should have exactly one record:
final ArrayList<Product2<Object, Object>> dataToWrite =
new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
for (int i = 0; i < NUM_PARTITITONS; i++) {
dataToWrite.add(new Tuple2<Object, Object>(i, i));
}
Expand Down Expand Up @@ -325,8 +324,7 @@ private void testMergingSpills(
conf.set("spark.shuffle.compress", "false");
}
final UnsafeShuffleWriter<Object, Object> writer = createWriter(transferToEnabled);
final ArrayList<Product2<Object, Object>> dataToWrite =
new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
for (int i : new int[] { 1, 2, 3, 4, 4, 2 }) {
dataToWrite.add(new Tuple2<Object, Object>(i, i));
}
Expand Down Expand Up @@ -403,7 +401,7 @@ public void mergeSpillsWithFileStreamAndNoCompression() throws Exception {
public void writeEnoughDataToTriggerSpill() throws Exception {
memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES);
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
final byte[] bigByteArray = new byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10];
for (int i = 0; i < 10 + 1; i++) {
dataToWrite.add(new Tuple2<Object, Object>(i, bigByteArray));
Expand Down Expand Up @@ -445,8 +443,7 @@ public void writeEnoughRecordsToTriggerSortBufferExpansionAndSpill() throws Exce
@Test
public void writeRecordsThatAreBiggerThanDiskWriteBufferSize() throws Exception {
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite =
new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
final byte[] bytes = new byte[(int) (ShuffleExternalSorter.DISK_WRITE_BUFFER_SIZE * 2.5)];
new Random(42).nextBytes(bytes);
dataToWrite.add(new Tuple2<Object, Object>(1, ByteBuffer.wrap(bytes)));
Expand All @@ -461,7 +458,7 @@ public void writeRecordsThatAreBiggerThanDiskWriteBufferSize() throws Exception
@Test
public void writeRecordsThatAreBiggerThanMaxRecordSize() throws Exception {
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
dataToWrite.add(new Tuple2<Object, Object>(1, ByteBuffer.wrap(new byte[1])));
// We should be able to write a record that's right _at_ the max record size
final byte[] atMaxRecordSize = new byte[(int) taskMemoryManager.pageSizeBytes() - 4];
Expand Down Expand Up @@ -498,7 +495,7 @@ public void testPeakMemoryUsed() throws Exception {
taskMemoryManager = spy(taskMemoryManager);
when(taskMemoryManager.pageSizeBytes()).thenReturn(pageSizeBytes);
final UnsafeShuffleWriter<Object, Object> writer =
new UnsafeShuffleWriter<Object, Object>(
new UnsafeShuffleWriter<>(
blockManager,
shuffleBlockResolver,
taskMemoryManager,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ public abstract class AbstractBytesToBytesMapSuite {
private TaskMemoryManager taskMemoryManager;
private static final long PAGE_SIZE_BYTES = 1L << 26; // 64 megabytes

final LinkedList<File> spillFilesCreated = new LinkedList<File>();
final LinkedList<File> spillFilesCreated = new LinkedList<>();
File tempDir;

@Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager;
Expand Down Expand Up @@ -397,7 +397,7 @@ public void randomizedStressTest() {
final int size = 65536;
// Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays
// into ByteBuffers in order to use them as keys here.
final Map<ByteBuffer, byte[]> expected = new HashMap<ByteBuffer, byte[]>();
final Map<ByteBuffer, byte[]> expected = new HashMap<>();
final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, size, PAGE_SIZE_BYTES);
try {
// Fill the map to 90% full so that we can trigger probing
Expand Down Expand Up @@ -453,7 +453,7 @@ public void randomizedTestWithRecordsLargerThanPageSize() {
final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 64, pageSizeBytes);
// Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays
// into ByteBuffers in order to use them as keys here.
final Map<ByteBuffer, byte[]> expected = new HashMap<ByteBuffer, byte[]>();
final Map<ByteBuffer, byte[]> expected = new HashMap<>();
try {
for (int i = 0; i < 1000; i++) {
final byte[] key = getRandomByteArray(rand.nextInt(128));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ private static int[] createArray(List<Long> runs, int length) {
* @param length The sum of all run lengths that will be added to <code>runs</code>.
*/
private static List<Long> runsJDKWorstCase(int minRun, int length) {
List<Long> runs = new ArrayList<Long>();
List<Long> runs = new ArrayList<>();

long runningTotal = 0, Y = minRun + 4, X = minRun;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@

public class UnsafeExternalSorterSuite {

final LinkedList<File> spillFilesCreated = new LinkedList<File>();
final LinkedList<File> spillFilesCreated = new LinkedList<>();
final TestMemoryManager memoryManager =
new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false"));
final TaskMemoryManager taskMemoryManager = new TaskMemoryManager(memoryManager, 0);
Expand Down
4 changes: 2 additions & 2 deletions docs/sql-programming-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -760,7 +760,7 @@ JavaRDD<String> people = sc.textFile("examples/src/main/resources/people.txt");
String schemaString = "name age";

// Generate the schema based on the string of schema
List<StructField> fields = new ArrayList<StructField>();
List<StructField> fields = new ArrayList<>();
for (String fieldName: schemaString.split(" ")) {
fields.add(DataTypes.createStructField(fieldName, DataTypes.StringType, true));
}
Expand Down Expand Up @@ -1935,7 +1935,7 @@ val jdbcDF = sqlContext.read.format("jdbc").options(

{% highlight java %}

Map<String, String> options = new HashMap<String, String>();
Map<String, String> options = new HashMap<>();
options.put("url", "jdbc:postgresql:dbserver");
options.put("dbtable", "schema.tablename");

Expand Down
4 changes: 2 additions & 2 deletions docs/streaming-programming-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ Next, we want to count these words.
JavaPairDStream<String, Integer> pairs = words.mapToPair(
new PairFunction<String, String, Integer>() {
@Override public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
});
JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey(
Expand Down Expand Up @@ -2095,7 +2095,7 @@ unifiedStream.print()
<div data-lang="java" markdown="1">
{% highlight java %}
int numStreams = 5;
List<JavaPairDStream<String, String>> kafkaStreams = new ArrayList<JavaPairDStream<String, String>>(numStreams);
List<JavaPairDStream<String, String>> kafkaStreams = new ArrayList<>(numStreams);
for (int i = 0; i < numStreams; i++) {
kafkaStreams.add(KafkaUtils.createStream(...));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ public static void main(String[] args) {
JavaPairRDD<Tuple3<String, String, String>, Stats> extracted = dataSet.mapToPair(new PairFunction<String, Tuple3<String, String, String>, Stats>() {
@Override
public Tuple2<Tuple3<String, String, String>, Stats> call(String s) {
return new Tuple2<Tuple3<String, String, String>, Stats>(extractKey(s), extractStats(s));
return new Tuple2<>(extractKey(s), extractStats(s));
}
});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ public static void main(String[] args) throws Exception {
@Override
public Tuple2<String, String> call(String s) {
String[] parts = SPACES.split(s);
return new Tuple2<String, String>(parts[0], parts[1]);
return new Tuple2<>(parts[0], parts[1]);
}
}).distinct().groupByKey().cache();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ public static void main(String[] args) throws Exception {

int slices = (args.length == 1) ? Integer.parseInt(args[0]) : 2;
int n = 100000 * slices;
List<Integer> l = new ArrayList<Integer>(n);
List<Integer> l = new ArrayList<>(n);
for (int i = 0; i < n; i++) {
l.add(i);
}
Expand Down
10 changes: 5 additions & 5 deletions examples/src/main/java/org/apache/spark/examples/JavaTC.java
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,16 @@ public final class JavaTC {
private static final Random rand = new Random(42);

static List<Tuple2<Integer, Integer>> generateGraph() {
Set<Tuple2<Integer, Integer>> edges = new HashSet<Tuple2<Integer, Integer>>(numEdges);
Set<Tuple2<Integer, Integer>> edges = new HashSet<>(numEdges);
while (edges.size() < numEdges) {
int from = rand.nextInt(numVertices);
int to = rand.nextInt(numVertices);
Tuple2<Integer, Integer> e = new Tuple2<Integer, Integer>(from, to);
Tuple2<Integer, Integer> e = new Tuple2<>(from, to);
if (from != to) {
edges.add(e);
}
}
return new ArrayList<Tuple2<Integer, Integer>>(edges);
return new ArrayList<>(edges);
}

static class ProjectFn implements PairFunction<Tuple2<Integer, Tuple2<Integer, Integer>>,
Expand All @@ -59,7 +59,7 @@ static class ProjectFn implements PairFunction<Tuple2<Integer, Tuple2<Integer, I

@Override
public Tuple2<Integer, Integer> call(Tuple2<Integer, Tuple2<Integer, Integer>> triple) {
return new Tuple2<Integer, Integer>(triple._2()._2(), triple._2()._1());
return new Tuple2<>(triple._2()._2(), triple._2()._1());
}
}

Expand All @@ -79,7 +79,7 @@ public static void main(String[] args) {
new PairFunction<Tuple2<Integer, Integer>, Integer, Integer>() {
@Override
public Tuple2<Integer, Integer> call(Tuple2<Integer, Integer> e) {
return new Tuple2<Integer, Integer>(e._2(), e._1());
return new Tuple2<>(e._2(), e._1());
}
});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ public Iterator<String> call(String s) {
JavaPairRDD<String, Integer> ones = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ public static void main(String[] args) {
RowFactory.create("b", Vectors.dense(4.0, 5.0, 6.0))
));

List<StructField> fields = new ArrayList<StructField>(2);
List<StructField> fields = new ArrayList<>(2);
fields.add(DataTypes.createStructField("id", DataTypes.StringType, false));
fields.add(DataTypes.createStructField("vector", new VectorUDT(), false));

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ public static void main(String[] args) {
// Set parameters.
// Empty categoricalFeaturesInfo indicates all features are continuous.
Integer numClasses = 2;
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
String impurity = "gini";
Integer maxDepth = 5;
Integer maxBins = 32;
Expand All @@ -68,7 +68,7 @@ public static void main(String[] args) {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testErr =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ public static void main(String[] args) {

// Set parameters.
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
String impurity = "variance";
Integer maxDepth = 5;
Integer maxBins = 32;
Expand All @@ -68,7 +68,7 @@ public static void main(String[] args) {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testMSE =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ public static void main(String[] args) {
boostingStrategy.getTreeStrategy().setNumClasses(2);
boostingStrategy.getTreeStrategy().setMaxDepth(5);
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo);

final GradientBoostedTreesModel model =
Expand All @@ -69,7 +69,7 @@ public static void main(String[] args) {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testErr =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ public static void main(String[] args) {
boostingStrategy.setNumIterations(3); // Note: Use more iterations in practice.
boostingStrategy.getTreeStrategy().setMaxDepth(5);
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo);

final GradientBoostedTreesModel model =
Expand All @@ -68,7 +68,7 @@ public static void main(String[] args) {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testMSE =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ public Tuple3<Double, Double, Double> call(String line) {
@Override
public Tuple2<Double, Double> call(Tuple3<Double, Double, Double> point) {
Double predictedLabel = model.predict(point._2());
return new Tuple2<Double, Double>(predictedLabel, point._1());
return new Tuple2<>(predictedLabel, point._1());
}
}
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ public LabeledPoint call(String line) {
new Function<LabeledPoint, Tuple2<Double, Double>>() {
public Tuple2<Double, Double> call(LabeledPoint point) {
double prediction = model.predict(point.features());
return new Tuple2<Double, Double>(prediction, point.label());
return new Tuple2<>(prediction, point.label());
}
}
);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ public static void main(String[] args) {
test.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
double accuracy = predictionAndLabel.filter(new Function<Tuple2<Double, Double>, Boolean>() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ public static void main(String[] args) {

// $example on$
double[][] array = {{1.12, 2.05, 3.12}, {5.56, 6.28, 8.94}, {10.2, 8.0, 20.5}};
LinkedList<Vector> rowsList = new LinkedList<Vector>();
LinkedList<Vector> rowsList = new LinkedList<>();
for (int i = 0; i < array.length; i++) {
Vector currentRow = Vectors.dense(array[i]);
rowsList.add(currentRow);
Expand Down
Loading

0 comments on commit c3689bc

Please sign in to comment.