Skip to content

Commit

Permalink
[MINOR][MLLIB][SQL] Clean up unused variables and unused import
Browse files Browse the repository at this point in the history
## What changes were proposed in this pull request?

Clean up unused variables and unused import statements, unnecessary `return` and `toArray`, and some more style improvement,  when I walk through the code examples.

## How was this patch tested?

Testet manually on local laptop.

Author: Xin Ren <[email protected]>

Closes apache#14836 from keypointt/codeWalkThroughML.
  • Loading branch information
keypointt authored and srowen committed Aug 30, 2016
1 parent d4eee99 commit 2720925
Show file tree
Hide file tree
Showing 10 changed files with 16 additions and 14 deletions.
6 changes: 4 additions & 2 deletions core/src/test/scala/org/apache/spark/AccumulatorSuite.scala
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,9 @@ class AccumulatorSuite extends SparkFunSuite with Matchers with LocalSparkContex
val acc: Accumulator[Int] = sc.accumulator(0)

val d = sc.parallelize(1 to 20)
an [Exception] should be thrownBy {d.foreach{x => acc.value = x}}
intercept[SparkException] {
d.foreach(x => acc.value = x)
}
}

test ("add value to collection accumulators") {
Expand Down Expand Up @@ -171,7 +173,7 @@ class AccumulatorSuite extends SparkFunSuite with Matchers with LocalSparkContex
d.foreach {
x => acc.localValue ++= x
}
acc.value should be ( (0 to maxI).toSet)
acc.value should be ((0 to maxI).toSet)
resetSparkContext()
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ class Interaction @Since("1.6.0") (@Since("1.6.0") override val uid: String) ext
case _: VectorUDT =>
val attrs = AttributeGroup.fromStructField(f).attributes.getOrElse(
throw new SparkException("Vector attributes must be defined for interaction."))
attrs.map(getNumFeatures).toArray
attrs.map(getNumFeatures)
}
new FeatureEncoder(numFeatures)
}.toArray
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._

import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.attribute.{AttributeGroup}
import org.apache.spark.ml.attribute.AttributeGroup
import org.apache.spark.ml.feature.RFormula
import org.apache.spark.ml.regression.{IsotonicRegression, IsotonicRegressionModel}
import org.apache.spark.ml.util._
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ package org.apache.spark.ml.util
import scala.collection.mutable

import org.apache.spark.SparkContext
import org.apache.spark.util.LongAccumulator;
import org.apache.spark.util.LongAccumulator

/**
* Abstract class for stopwatches.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ object ChiSqSelectorModel extends Loader[ChiSqSelectorModel] {
case Row(feature: Int) => (feature)
}.collect()

return new ChiSqSelectorModel(features)
new ChiSqSelectorModel(features)
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -438,10 +438,10 @@ object RandomRDDs {
@DeveloperApi
@Since("1.6.0")
def randomJavaRDD[T](
jsc: JavaSparkContext,
generator: RandomDataGenerator[T],
size: Long): JavaRDD[T] = {
randomJavaRDD(jsc, generator, size, 0);
jsc: JavaSparkContext,
generator: RandomDataGenerator[T],
size: Long): JavaRDD[T] = {
randomJavaRDD(jsc, generator, size, 0)
}

// TODO Generate RDD[Vector] from multivariate distributions.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.LogicalRDD
import org.apache.spark.sql.execution.datasources.{DataSource, LogicalRelation}
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.datasources.jdbc.{JDBCPartition, JDBCPartitioningInfo, JDBCRelation}
import org.apache.spark.sql.execution.datasources.json.{InferSchema, JacksonParser, JSONOptions}
import org.apache.spark.sql.types.StructType
Expand Down
2 changes: 1 addition & 1 deletion sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.usePrettyExpression
import org.apache.spark.sql.execution.{FileRelation, LogicalRDD, QueryExecution, SQLExecution}
import org.apache.spark.sql.execution.command.{CreateViewCommand, ExplainCommand}
import org.apache.spark.sql.execution.datasources.{CreateTable, LogicalRelation}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.datasources.json.JacksonGenerator
import org.apache.spark.sql.execution.python.EvaluatePython
import org.apache.spark.sql.streaming.{DataStreamWriter, StreamingQuery}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1093,7 +1093,7 @@ object SQLContext {
}
data.map{ element =>
new GenericInternalRow(
methodsToConverts.map { case (e, convert) => convert(e.invoke(element)) }.toArray[Any]
methodsToConverts.map { case (e, convert) => convert(e.invoke(element)) }
): InternalRow
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ import org.apache.spark.util.Utils
* qualified. This option only works when reading from a [[FileFormat]].
* @param userSpecifiedSchema An optional specification of the schema of the data. When present
* we skip attempting to infer the schema.
* @param partitionColumns A list of column names that the relation is partitioned by. When this
* @param partitionColumns A list of column names that the relation is partitioned by. When this
* list is empty, the relation is unpartitioned.
* @param bucketSpec An optional specification for bucketing (hash-partitioning) of the data.
*/
Expand Down

0 comments on commit 2720925

Please sign in to comment.