forked from apache/spark
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[SPARK-13017][DOCS] Replace example code in mllib-feature-extraction.…
…md using include_example Replace example code in mllib-feature-extraction.md using include_example https://issues.apache.org/jira/browse/SPARK-13017 The example code in the user guide is embedded in the markdown and hence it is not easy to test. It would be nice to automatically test them. This JIRA is to discuss options to automate example code testing and see what we can do in Spark 1.6. Goal is to move actual example code to spark/examples and test compilation in Jenkins builds. Then in the markdown, we can reference part of the code to show in the user guide. This requires adding a Jekyll tag that is similar to https://github.com/jekyll/jekyll/blob/master/lib/jekyll/tags/include.rb, e.g., called include_example. `{% include_example scala/org/apache/spark/examples/mllib/TFIDFExample.scala %}` Jekyll will find `examples/src/main/scala/org/apache/spark/examples/mllib/TFIDFExample.scala` and pick code blocks marked "example" and replace code block in `{% highlight %}` in the markdown. See more sub-tasks in parent ticket: https://issues.apache.org/jira/browse/SPARK-11337 Author: Xin Ren <[email protected]> Closes apache#11142 from keypointt/SPARK-13017.
- Loading branch information
Showing
15 changed files
with
861 additions
and
361 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
83 changes: 83 additions & 0 deletions
83
examples/src/main/java/org/apache/spark/examples/mllib/JavaChiSqSelectorExample.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,83 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.examples.mllib; | ||
|
||
import org.apache.spark.SparkConf; | ||
import org.apache.spark.api.java.JavaSparkContext; | ||
import org.apache.spark.api.java.function.VoidFunction; | ||
// $example on$ | ||
import org.apache.spark.api.java.JavaRDD; | ||
import org.apache.spark.api.java.function.Function; | ||
import org.apache.spark.mllib.feature.ChiSqSelector; | ||
import org.apache.spark.mllib.feature.ChiSqSelectorModel; | ||
import org.apache.spark.mllib.linalg.Vectors; | ||
import org.apache.spark.mllib.regression.LabeledPoint; | ||
import org.apache.spark.mllib.util.MLUtils; | ||
// $example off$ | ||
|
||
public class JavaChiSqSelectorExample { | ||
public static void main(String[] args) { | ||
|
||
SparkConf conf = new SparkConf().setAppName("JavaChiSqSelectorExample"); | ||
JavaSparkContext jsc = new JavaSparkContext(conf); | ||
|
||
// $example on$ | ||
JavaRDD<LabeledPoint> points = MLUtils.loadLibSVMFile(jsc.sc(), | ||
"data/mllib/sample_libsvm_data.txt").toJavaRDD().cache(); | ||
|
||
// Discretize data in 16 equal bins since ChiSqSelector requires categorical features | ||
// Although features are doubles, the ChiSqSelector treats each unique value as a category | ||
JavaRDD<LabeledPoint> discretizedData = points.map( | ||
new Function<LabeledPoint, LabeledPoint>() { | ||
@Override | ||
public LabeledPoint call(LabeledPoint lp) { | ||
final double[] discretizedFeatures = new double[lp.features().size()]; | ||
for (int i = 0; i < lp.features().size(); ++i) { | ||
discretizedFeatures[i] = Math.floor(lp.features().apply(i) / 16); | ||
} | ||
return new LabeledPoint(lp.label(), Vectors.dense(discretizedFeatures)); | ||
} | ||
} | ||
); | ||
|
||
// Create ChiSqSelector that will select top 50 of 692 features | ||
ChiSqSelector selector = new ChiSqSelector(50); | ||
// Create ChiSqSelector model (selecting features) | ||
final ChiSqSelectorModel transformer = selector.fit(discretizedData.rdd()); | ||
// Filter the top 50 features from each feature vector | ||
JavaRDD<LabeledPoint> filteredData = discretizedData.map( | ||
new Function<LabeledPoint, LabeledPoint>() { | ||
@Override | ||
public LabeledPoint call(LabeledPoint lp) { | ||
return new LabeledPoint(lp.label(), transformer.transform(lp.features())); | ||
} | ||
} | ||
); | ||
// $example off$ | ||
|
||
System.out.println("filtered data: "); | ||
filteredData.foreach(new VoidFunction<LabeledPoint>() { | ||
@Override | ||
public void call(LabeledPoint labeledPoint) throws Exception { | ||
System.out.println(labeledPoint.toString()); | ||
} | ||
}); | ||
|
||
jsc.stop(); | ||
} | ||
} |
78 changes: 78 additions & 0 deletions
78
examples/src/main/java/org/apache/spark/examples/mllib/JavaElementwiseProductExample.java
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,78 @@ | ||
/* | ||
* Licensed to the Apache Software Foundation (ASF) under one or more | ||
* contributor license agreements. See the NOTICE file distributed with | ||
* this work for additional information regarding copyright ownership. | ||
* The ASF licenses this file to You under the Apache License, Version 2.0 | ||
* (the "License"); you may not use this file except in compliance with | ||
* the License. You may obtain a copy of the License at | ||
* | ||
* http://www.apache.org/licenses/LICENSE-2.0 | ||
* | ||
* Unless required by applicable law or agreed to in writing, software | ||
* distributed under the License is distributed on an "AS IS" BASIS, | ||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
* See the License for the specific language governing permissions and | ||
* limitations under the License. | ||
*/ | ||
|
||
package org.apache.spark.examples.mllib; | ||
|
||
// $example on$ | ||
import java.util.Arrays; | ||
// $example off$ | ||
|
||
import org.apache.spark.SparkConf; | ||
import org.apache.spark.api.java.JavaSparkContext; | ||
// $example on$ | ||
import org.apache.spark.api.java.JavaRDD; | ||
import org.apache.spark.api.java.function.Function; | ||
import org.apache.spark.mllib.feature.ElementwiseProduct; | ||
import org.apache.spark.mllib.linalg.Vector; | ||
import org.apache.spark.mllib.linalg.Vectors; | ||
// $example off$ | ||
import org.apache.spark.api.java.function.VoidFunction; | ||
|
||
public class JavaElementwiseProductExample { | ||
public static void main(String[] args) { | ||
|
||
SparkConf conf = new SparkConf().setAppName("JavaElementwiseProductExample"); | ||
JavaSparkContext jsc = new JavaSparkContext(conf); | ||
|
||
// $example on$ | ||
// Create some vector data; also works for sparse vectors | ||
JavaRDD<Vector> data = jsc.parallelize(Arrays.asList( | ||
Vectors.dense(1.0, 2.0, 3.0), Vectors.dense(4.0, 5.0, 6.0))); | ||
Vector transformingVector = Vectors.dense(0.0, 1.0, 2.0); | ||
final ElementwiseProduct transformer = new ElementwiseProduct(transformingVector); | ||
|
||
// Batch transform and per-row transform give the same results: | ||
JavaRDD<Vector> transformedData = transformer.transform(data); | ||
JavaRDD<Vector> transformedData2 = data.map( | ||
new Function<Vector, Vector>() { | ||
@Override | ||
public Vector call(Vector v) { | ||
return transformer.transform(v); | ||
} | ||
} | ||
); | ||
// $example off$ | ||
|
||
System.out.println("transformedData: "); | ||
transformedData.foreach(new VoidFunction<Vector>() { | ||
@Override | ||
public void call(Vector vector) throws Exception { | ||
System.out.println(vector.toString()); | ||
} | ||
}); | ||
|
||
System.out.println("transformedData2: "); | ||
transformedData2.foreach(new VoidFunction<Vector>() { | ||
@Override | ||
public void call(Vector vector) throws Exception { | ||
System.out.println(vector.toString()); | ||
} | ||
}); | ||
|
||
jsc.stop(); | ||
} | ||
} |
51 changes: 51 additions & 0 deletions
51
examples/src/main/python/mllib/elementwise_product_example.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,51 @@ | ||
# | ||
# Licensed to the Apache Software Foundation (ASF) under one or more | ||
# contributor license agreements. See the NOTICE file distributed with | ||
# this work for additional information regarding copyright ownership. | ||
# The ASF licenses this file to You under the Apache License, Version 2.0 | ||
# (the "License"); you may not use this file except in compliance with | ||
# the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# | ||
|
||
from __future__ import print_function | ||
|
||
from pyspark import SparkContext | ||
# $example on$ | ||
from pyspark.mllib.feature import ElementwiseProduct | ||
from pyspark.mllib.linalg import Vectors | ||
# $example off$ | ||
|
||
if __name__ == "__main__": | ||
sc = SparkContext(appName="ElementwiseProductExample") # SparkContext | ||
|
||
# $example on$ | ||
data = sc.textFile("data/mllib/kmeans_data.txt") | ||
parsedData = data.map(lambda x: [float(t) for t in x.split(" ")]) | ||
|
||
# Create weight vector. | ||
transformingVector = Vectors.dense([0.0, 1.0, 2.0]) | ||
transformer = ElementwiseProduct(transformingVector) | ||
|
||
# Batch transform | ||
transformedData = transformer.transform(parsedData) | ||
# Single-row transform | ||
transformedData2 = transformer.transform(parsedData.first()) | ||
# $example off$ | ||
|
||
print("transformedData:") | ||
for each in transformedData.collect(): | ||
print(each) | ||
|
||
print("transformedData2:") | ||
for each in transformedData2.collect(): | ||
print(each) | ||
|
||
sc.stop() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
# | ||
# Licensed to the Apache Software Foundation (ASF) under one or more | ||
# contributor license agreements. See the NOTICE file distributed with | ||
# this work for additional information regarding copyright ownership. | ||
# The ASF licenses this file to You under the Apache License, Version 2.0 | ||
# (the "License"); you may not use this file except in compliance with | ||
# the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# | ||
|
||
from __future__ import print_function | ||
|
||
from pyspark import SparkContext | ||
# $example on$ | ||
from pyspark.mllib.feature import Normalizer | ||
from pyspark.mllib.util import MLUtils | ||
# $example off$ | ||
|
||
if __name__ == "__main__": | ||
sc = SparkContext(appName="NormalizerExample") # SparkContext | ||
|
||
# $example on$ | ||
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt") | ||
labels = data.map(lambda x: x.label) | ||
features = data.map(lambda x: x.features) | ||
|
||
normalizer1 = Normalizer() | ||
normalizer2 = Normalizer(p=float("inf")) | ||
|
||
# Each sample in data1 will be normalized using $L^2$ norm. | ||
data1 = labels.zip(normalizer1.transform(features)) | ||
|
||
# Each sample in data2 will be normalized using $L^\infty$ norm. | ||
data2 = labels.zip(normalizer2.transform(features)) | ||
# $example off$ | ||
|
||
print("data1:") | ||
for each in data1.collect(): | ||
print(each) | ||
|
||
print("data2:") | ||
for each in data2.collect(): | ||
print(each) | ||
|
||
sc.stop() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,55 @@ | ||
# | ||
# Licensed to the Apache Software Foundation (ASF) under one or more | ||
# contributor license agreements. See the NOTICE file distributed with | ||
# this work for additional information regarding copyright ownership. | ||
# The ASF licenses this file to You under the Apache License, Version 2.0 | ||
# (the "License"); you may not use this file except in compliance with | ||
# the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# | ||
|
||
from __future__ import print_function | ||
|
||
from pyspark import SparkContext | ||
# $example on$ | ||
from pyspark.mllib.feature import StandardScaler, StandardScalerModel | ||
from pyspark.mllib.linalg import Vectors | ||
from pyspark.mllib.util import MLUtils | ||
# $example off$ | ||
|
||
if __name__ == "__main__": | ||
sc = SparkContext(appName="StandardScalerExample") # SparkContext | ||
|
||
# $example on$ | ||
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt") | ||
label = data.map(lambda x: x.label) | ||
features = data.map(lambda x: x.features) | ||
|
||
scaler1 = StandardScaler().fit(features) | ||
scaler2 = StandardScaler(withMean=True, withStd=True).fit(features) | ||
|
||
# data1 will be unit variance. | ||
data1 = label.zip(scaler1.transform(features)) | ||
|
||
# Without converting the features into dense vectors, transformation with zero mean will raise | ||
# exception on sparse vector. | ||
# data2 will be unit variance and zero mean. | ||
data2 = label.zip(scaler2.transform(features.map(lambda x: Vectors.dense(x.toArray())))) | ||
# $example off$ | ||
|
||
print("data1:") | ||
for each in data1.collect(): | ||
print(each) | ||
|
||
print("data2:") | ||
for each in data2.collect(): | ||
print(each) | ||
|
||
sc.stop() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
# | ||
# Licensed to the Apache Software Foundation (ASF) under one or more | ||
# contributor license agreements. See the NOTICE file distributed with | ||
# this work for additional information regarding copyright ownership. | ||
# The ASF licenses this file to You under the Apache License, Version 2.0 | ||
# (the "License"); you may not use this file except in compliance with | ||
# the License. You may obtain a copy of the License at | ||
# | ||
# http://www.apache.org/licenses/LICENSE-2.0 | ||
# | ||
# Unless required by applicable law or agreed to in writing, software | ||
# distributed under the License is distributed on an "AS IS" BASIS, | ||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
# See the License for the specific language governing permissions and | ||
# limitations under the License. | ||
# | ||
|
||
from __future__ import print_function | ||
|
||
from pyspark import SparkContext | ||
# $example on$ | ||
from pyspark.mllib.feature import HashingTF, IDF | ||
# $example off$ | ||
|
||
if __name__ == "__main__": | ||
sc = SparkContext(appName="TFIDFExample") # SparkContext | ||
|
||
# $example on$ | ||
# Load documents (one per line). | ||
documents = sc.textFile("data/mllib/kmeans_data.txt").map(lambda line: line.split(" ")) | ||
|
||
hashingTF = HashingTF() | ||
tf = hashingTF.transform(documents) | ||
|
||
# While applying HashingTF only needs a single pass to the data, applying IDF needs two passes: | ||
# First to compute the IDF vector and second to scale the term frequencies by IDF. | ||
tf.cache() | ||
idf = IDF().fit(tf) | ||
tfidf = idf.transform(tf) | ||
|
||
# spark.mllib's IDF implementation provides an option for ignoring terms | ||
# which occur in less than a minimum number of documents. | ||
# In such cases, the IDF for these terms is set to 0. | ||
# This feature can be used by passing the minDocFreq value to the IDF constructor. | ||
idfIgnore = IDF(minDocFreq=2).fit(tf) | ||
tfidfIgnore = idf.transform(tf) | ||
# $example off$ | ||
|
||
print("tfidf:") | ||
for each in tfidf.collect(): | ||
print(each) | ||
|
||
print("tfidfIgnore:") | ||
for each in tfidfIgnore.collect(): | ||
print(each) | ||
|
||
sc.stop() |
Oops, something went wrong.