Repository: spark
Updated Branches:
  refs/heads/master e328b69c3 -> 820064e61


[SPARK-11380][DOCS] Replace example code in mllib-frequent-pattern-mining.md 
using include_example

Author: Pravin Gadakh <pravingadakh...@gmail.com>
Author: Pravin Gadakh <prgad...@in.ibm.com>

Closes #9340 from pravingadakh/SPARK-11380.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/820064e6
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/820064e6
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/820064e6

Branch: refs/heads/master
Commit: 820064e613609bbf7edd726d982da1de60bf417a
Parents: e328b69
Author: Pravin Gadakh <pravingadakh...@gmail.com>
Authored: Wed Nov 4 08:32:08 2015 -0800
Committer: Xiangrui Meng <m...@databricks.com>
Committed: Wed Nov 4 08:32:08 2015 -0800

----------------------------------------------------------------------
 docs/mllib-frequent-pattern-mining.md           | 168 +------------------
 .../mllib/JavaAssociationRulesExample.java      |  56 +++++++
 .../examples/mllib/JavaPrefixSpanExample.java   |  55 ++++++
 .../examples/mllib/JavaSimpleFPGrowth.java      |  71 ++++++++
 .../src/main/python/mllib/fpgrowth_example.py   |  33 ++++
 .../mllib/AssociationRulesExample.scala         |  54 ++++++
 .../examples/mllib/PrefixSpanExample.scala      |  52 ++++++
 .../spark/examples/mllib/SimpleFPGrowth.scala   |  59 +++++++
 8 files changed, 387 insertions(+), 161 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/820064e6/docs/mllib-frequent-pattern-mining.md
----------------------------------------------------------------------
diff --git a/docs/mllib-frequent-pattern-mining.md 
b/docs/mllib-frequent-pattern-mining.md
index f749eb4..fe42896 100644
--- a/docs/mllib-frequent-pattern-mining.md
+++ b/docs/mllib-frequent-pattern-mining.md
@@ -52,31 +52,7 @@ details) from `transactions`.
 
 Refer to the [`FPGrowth` Scala 
docs](api/scala/index.html#org.apache.spark.mllib.fpm.FPGrowth) for details on 
the API.
 
-{% highlight scala %}
-import org.apache.spark.rdd.RDD
-import org.apache.spark.mllib.fpm.FPGrowth
-
-val data = sc.textFile("data/mllib/sample_fpgrowth.txt")
-
-val transactions: RDD[Array[String]] = data.map(s => s.trim.split(' '))
-
-val fpg = new FPGrowth()
-  .setMinSupport(0.2)
-  .setNumPartitions(10)
-val model = fpg.run(transactions)
-
-model.freqItemsets.collect().foreach { itemset =>
-  println(itemset.items.mkString("[", ",", "]") + ", " + itemset.freq)
-}
-
-val minConfidence = 0.8
-model.generateAssociationRules(minConfidence).collect().foreach { rule =>
-  println(
-    rule.antecedent.mkString("[", ",", "]")
-      + " => " + rule.consequent .mkString("[", ",", "]")
-      + ", " + rule.confidence)
-}
-{% endhighlight %}
+{% include_example scala/org/apache/spark/examples/mllib/SimpleFPGrowth.scala 
%}
 
 </div>
 
@@ -95,46 +71,7 @@ details) from `transactions`.
 
 Refer to the [`FPGrowth` Java 
docs](api/java/org/apache/spark/mllib/fpm/FPGrowth.html) for details on the API.
 
-{% highlight java %}
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.spark.api.java.JavaRDD;
-import org.apache.spark.api.java.JavaSparkContext;
-import org.apache.spark.mllib.fpm.AssociationRules;
-import org.apache.spark.mllib.fpm.FPGrowth;
-import org.apache.spark.mllib.fpm.FPGrowthModel;
-
-SparkConf conf = new SparkConf().setAppName("FP-growth Example");
-JavaSparkContext sc = new JavaSparkContext(conf);
-
-JavaRDD<String> data = sc.textFile("data/mllib/sample_fpgrowth.txt");
-
-JavaRDD<List<String>> transactions = data.map(
-  new Function<String, List<String>>() {
-    public List<String> call(String line) {
-      String[] parts = line.split(" ");
-      return Arrays.asList(parts);
-    }
-  }
-);
-
-FPGrowth fpg = new FPGrowth()
-  .setMinSupport(0.2)
-  .setNumPartitions(10);
-FPGrowthModel<String> model = fpg.run(transactions);
-
-for (FPGrowth.FreqItemset<String> itemset: 
model.freqItemsets().toJavaRDD().collect()) {
-  System.out.println("[" + itemset.javaItems() + "], " + itemset.freq());
-}
-
-double minConfidence = 0.8;
-for (AssociationRules.Rule<String> rule
-    : model.generateAssociationRules(minConfidence).toJavaRDD().collect()) {
-  System.out.println(
-    rule.javaAntecedent() + " => " + rule.javaConsequent() + ", " + 
rule.confidence());
-}
-{% endhighlight %}
+{% include_example 
java/org/apache/spark/examples/mllib/JavaSimpleFPGrowth.java %}
 
 </div>
 
@@ -149,19 +86,7 @@ that stores the frequent itemsets with their frequencies.
 
 Refer to the [`FPGrowth` Python 
docs](api/python/pyspark.mllib.html#pyspark.mllib.fpm.FPGrowth) for more 
details on the API.
 
-{% highlight python %}
-from pyspark.mllib.fpm import FPGrowth
-
-data = sc.textFile("data/mllib/sample_fpgrowth.txt")
-
-transactions = data.map(lambda line: line.strip().split(' '))
-
-model = FPGrowth.train(transactions, minSupport=0.2, numPartitions=10)
-
-result = model.freqItemsets().collect()
-for fi in result:
-    print(fi)
-{% endhighlight %}
+{% include_example python/mllib/fpgrowth_example.py %}
 
 </div>
 
@@ -177,27 +102,7 @@ that have a single item as the consequent.
 
 Refer to the [`AssociationRules` Scala 
docs](api/java/org/apache/spark/mllib/fpm/AssociationRules.html) for details on 
the API.
 
-{% highlight scala %}
-import org.apache.spark.rdd.RDD
-import org.apache.spark.mllib.fpm.AssociationRules
-import org.apache.spark.mllib.fpm.FPGrowth.FreqItemset
-
-val freqItemsets = sc.parallelize(Seq(
-  new FreqItemset(Array("a"), 15L),
-  new FreqItemset(Array("b"), 35L),
-  new FreqItemset(Array("a", "b"), 12L)
-));
-
-val ar = new AssociationRules()
-  .setMinConfidence(0.8)
-val results = ar.run(freqItemsets)
-
-results.collect().foreach { rule =>
-  println("[" + rule.antecedent.mkString(",")
-    + "=>"
-    + rule.consequent.mkString(",") + "]," + rule.confidence)
-}
-{% endhighlight %}
+{% include_example 
scala/org/apache/spark/examples/mllib/AssociationRulesExample.scala %}
 
 </div>
 
@@ -208,29 +113,7 @@ that have a single item as the consequent.
 
 Refer to the [`AssociationRules` Java 
docs](api/java/org/apache/spark/mllib/fpm/AssociationRules.html) for details on 
the API.
 
-{% highlight java %}
-import java.util.Arrays;
-
-import org.apache.spark.api.java.JavaRDD;
-import org.apache.spark.api.java.JavaSparkContext;
-import org.apache.spark.mllib.fpm.AssociationRules;
-import org.apache.spark.mllib.fpm.FPGrowth.FreqItemset;
-
-JavaRDD<FPGrowth.FreqItemset<String>> freqItemsets = 
sc.parallelize(Arrays.asList(
-  new FreqItemset<String>(new String[] {"a"}, 15L),
-  new FreqItemset<String>(new String[] {"b"}, 35L),
-  new FreqItemset<String>(new String[] {"a", "b"}, 12L)
-));
-
-AssociationRules arules = new AssociationRules()
-  .setMinConfidence(0.8);
-JavaRDD<AssociationRules.Rule<String>> results = arules.run(freqItemsets);
-
-for (AssociationRules.Rule<String> rule: results.collect()) {
-  System.out.println(
-    rule.javaAntecedent() + " => " + rule.javaConsequent() + ", " + 
rule.confidence());
-}
-{% endhighlight %}
+{% include_example 
java/org/apache/spark/examples/mllib/JavaAssociationRulesExample.java %}
 
 </div>
 </div>
@@ -278,24 +161,7 @@ that stores the frequent sequences with their frequencies.
 
 Refer to the [`PrefixSpan` Scala 
docs](api/scala/index.html#org.apache.spark.mllib.fpm.PrefixSpan) and 
[`PrefixSpanModel` Scala 
docs](api/scala/index.html#org.apache.spark.mllib.fpm.PrefixSpanModel) for 
details on the API.
 
-{% highlight scala %}
-import org.apache.spark.mllib.fpm.PrefixSpan
-
-val sequences = sc.parallelize(Seq(
-    Array(Array(1, 2), Array(3)),
-    Array(Array(1), Array(3, 2), Array(1, 2)),
-    Array(Array(1, 2), Array(5)),
-    Array(Array(6))
-  ), 2).cache()
-val prefixSpan = new PrefixSpan()
-  .setMinSupport(0.5)
-  .setMaxPatternLength(5)
-val model = prefixSpan.run(sequences)
-model.freqSequences.collect().foreach { freqSequence =>
-println(
-  freqSequence.sequence.map(_.mkString("[", ", ", "]")).mkString("[", ", ", 
"]") + ", " + freqSequence.freq)
-}
-{% endhighlight %}
+{% include_example 
scala/org/apache/spark/examples/mllib/PrefixSpanExample.scala %}
 
 </div>
 
@@ -309,27 +175,7 @@ that stores the frequent sequences with their frequencies.
 
 Refer to the [`PrefixSpan` Java 
docs](api/java/org/apache/spark/mllib/fpm/PrefixSpan.html) and 
[`PrefixSpanModel` Java 
docs](api/java/org/apache/spark/mllib/fpm/PrefixSpanModel.html) for details on 
the API.
 
-{% highlight java %}
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.spark.mllib.fpm.PrefixSpan;
-import org.apache.spark.mllib.fpm.PrefixSpanModel;
-
-JavaRDD<List<List<Integer>>> sequences = sc.parallelize(Arrays.asList(
-  Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3)),
-  Arrays.asList(Arrays.asList(1), Arrays.asList(3, 2), Arrays.asList(1, 2)),
-  Arrays.asList(Arrays.asList(1, 2), Arrays.asList(5)),
-  Arrays.asList(Arrays.asList(6))
-), 2);
-PrefixSpan prefixSpan = new PrefixSpan()
-  .setMinSupport(0.5)
-  .setMaxPatternLength(5);
-PrefixSpanModel<Integer> model = prefixSpan.run(sequences);
-for (PrefixSpan.FreqSequence<Integer> freqSeq: 
model.freqSequences().toJavaRDD().collect()) {
-  System.out.println(freqSeq.javaSequence() + ", " + freqSeq.freq());
-}
-{% endhighlight %}
+{% include_example 
java/org/apache/spark/examples/mllib/JavaPrefixSpanExample.java %}
 
 </div>
 </div>

http://git-wip-us.apache.org/repos/asf/spark/blob/820064e6/examples/src/main/java/org/apache/spark/examples/mllib/JavaAssociationRulesExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaAssociationRulesExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaAssociationRulesExample.java
new file mode 100644
index 0000000..4d0f989
--- /dev/null
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaAssociationRulesExample.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.examples.mllib;
+
+// $example on$
+import java.util.Arrays;
+
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.mllib.fpm.AssociationRules;
+import org.apache.spark.mllib.fpm.FPGrowth;
+import org.apache.spark.mllib.fpm.FPGrowth.FreqItemset;
+// $example off$
+
+import org.apache.spark.SparkConf;
+
+public class JavaAssociationRulesExample {
+
+  public static void main(String[] args) {
+
+    SparkConf sparkConf = new 
SparkConf().setAppName("JavaAssociationRulesExample");
+    JavaSparkContext sc = new JavaSparkContext(sparkConf);
+
+    // $example on$
+    JavaRDD<FPGrowth.FreqItemset<String>> freqItemsets = 
sc.parallelize(Arrays.asList(
+      new FreqItemset<String>(new String[] {"a"}, 15L),
+      new FreqItemset<String>(new String[] {"b"}, 35L),
+      new FreqItemset<String>(new String[] {"a", "b"}, 12L)
+    ));
+
+    AssociationRules arules = new AssociationRules()
+      .setMinConfidence(0.8);
+    JavaRDD<AssociationRules.Rule<String>> results = arules.run(freqItemsets);
+
+    for (AssociationRules.Rule<String> rule : results.collect()) {
+      System.out.println(
+        rule.javaAntecedent() + " => " + rule.javaConsequent() + ", " + 
rule.confidence());
+    }
+    // $example off$
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/820064e6/examples/src/main/java/org/apache/spark/examples/mllib/JavaPrefixSpanExample.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaPrefixSpanExample.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPrefixSpanExample.java
new file mode 100644
index 0000000..68ec7c1
--- /dev/null
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaPrefixSpanExample.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.examples.mllib;
+
+// $example on$
+import java.util.Arrays;
+import java.util.List;
+// $example off$
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+// $example on$
+import org.apache.spark.mllib.fpm.PrefixSpan;
+import org.apache.spark.mllib.fpm.PrefixSpanModel;
+// $example off$
+import org.apache.spark.SparkConf;
+
+public class JavaPrefixSpanExample {
+
+  public static void main(String[] args) {
+
+    SparkConf sparkConf = new SparkConf().setAppName("JavaPrefixSpanExample");
+    JavaSparkContext sc = new JavaSparkContext(sparkConf);
+
+    // $example on$
+    JavaRDD<List<List<Integer>>> sequences = sc.parallelize(Arrays.asList(
+      Arrays.asList(Arrays.asList(1, 2), Arrays.asList(3)),
+      Arrays.asList(Arrays.asList(1), Arrays.asList(3, 2), Arrays.asList(1, 
2)),
+      Arrays.asList(Arrays.asList(1, 2), Arrays.asList(5)),
+      Arrays.asList(Arrays.asList(6))
+    ), 2);
+    PrefixSpan prefixSpan = new PrefixSpan()
+      .setMinSupport(0.5)
+      .setMaxPatternLength(5);
+    PrefixSpanModel<Integer> model = prefixSpan.run(sequences);
+    for (PrefixSpan.FreqSequence<Integer> freqSeq: 
model.freqSequences().toJavaRDD().collect()) {
+      System.out.println(freqSeq.javaSequence() + ", " + freqSeq.freq());
+    }
+    // $example off$
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/820064e6/examples/src/main/java/org/apache/spark/examples/mllib/JavaSimpleFPGrowth.java
----------------------------------------------------------------------
diff --git 
a/examples/src/main/java/org/apache/spark/examples/mllib/JavaSimpleFPGrowth.java
 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaSimpleFPGrowth.java
new file mode 100644
index 0000000..72edaca
--- /dev/null
+++ 
b/examples/src/main/java/org/apache/spark/examples/mllib/JavaSimpleFPGrowth.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.examples.mllib;
+
+// $example on$
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+// $example off$
+import org.apache.spark.api.java.function.Function;
+// $example on$
+import org.apache.spark.mllib.fpm.AssociationRules;
+import org.apache.spark.mllib.fpm.FPGrowth;
+import org.apache.spark.mllib.fpm.FPGrowthModel;
+// $example off$
+
+import org.apache.spark.SparkConf;
+
+public class JavaSimpleFPGrowth {
+
+  public static void main(String[] args) {
+    SparkConf conf = new SparkConf().setAppName("FP-growth Example");
+    JavaSparkContext sc = new JavaSparkContext(conf);
+
+    // $example on$
+    JavaRDD<String> data = sc.textFile("data/mllib/sample_fpgrowth.txt");
+
+    JavaRDD<List<String>> transactions = data.map(
+      new Function<String, List<String>>() {
+        public List<String> call(String line) {
+          String[] parts = line.split(" ");
+          return Arrays.asList(parts);
+        }
+      }
+    );
+
+    FPGrowth fpg = new FPGrowth()
+      .setMinSupport(0.2)
+      .setNumPartitions(10);
+    FPGrowthModel<String> model = fpg.run(transactions);
+
+    for (FPGrowth.FreqItemset<String> itemset: 
model.freqItemsets().toJavaRDD().collect()) {
+      System.out.println("[" + itemset.javaItems() + "], " + itemset.freq());
+    }
+
+    double minConfidence = 0.8;
+    for (AssociationRules.Rule<String> rule
+      : model.generateAssociationRules(minConfidence).toJavaRDD().collect()) {
+      System.out.println(
+        rule.javaAntecedent() + " => " + rule.javaConsequent() + ", " + 
rule.confidence());
+    }
+    // $example off$
+  }
+}

http://git-wip-us.apache.org/repos/asf/spark/blob/820064e6/examples/src/main/python/mllib/fpgrowth_example.py
----------------------------------------------------------------------
diff --git a/examples/src/main/python/mllib/fpgrowth_example.py 
b/examples/src/main/python/mllib/fpgrowth_example.py
new file mode 100644
index 0000000..715f526
--- /dev/null
+++ b/examples/src/main/python/mllib/fpgrowth_example.py
@@ -0,0 +1,33 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# $example on$
+from pyspark.mllib.fpm import FPGrowth
+# $example off$
+from pyspark import SparkContext
+
+if __name__ == "__main__":
+    sc = SparkContext(appName="FPGrowth")
+
+    # $example on$
+    data = sc.textFile("data/mllib/sample_fpgrowth.txt")
+    transactions = data.map(lambda line: line.strip().split(' '))
+    model = FPGrowth.train(transactions, minSupport=0.2, numPartitions=10)
+    result = model.freqItemsets().collect()
+    for fi in result:
+        print(fi)
+    # $example off$

http://git-wip-us.apache.org/repos/asf/spark/blob/820064e6/examples/src/main/scala/org/apache/spark/examples/mllib/AssociationRulesExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/AssociationRulesExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/AssociationRulesExample.scala
new file mode 100644
index 0000000..ca22dda
--- /dev/null
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/AssociationRulesExample.scala
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// scalastyle:off println
+package org.apache.spark.examples.mllib
+
+// $example on$
+import org.apache.spark.mllib.fpm.AssociationRules
+import org.apache.spark.mllib.fpm.FPGrowth.FreqItemset
+// $example off$
+
+import org.apache.spark.{SparkConf, SparkContext}
+
+object AssociationRulesExample {
+
+  def main(args: Array[String]) {
+    val conf = new SparkConf().setAppName("AssociationRulesExample")
+    val sc = new SparkContext(conf)
+
+    // $example on$
+    val freqItemsets = sc.parallelize(Seq(
+      new FreqItemset(Array("a"), 15L),
+      new FreqItemset(Array("b"), 35L),
+      new FreqItemset(Array("a", "b"), 12L)
+    ))
+
+    val ar = new AssociationRules()
+      .setMinConfidence(0.8)
+    val results = ar.run(freqItemsets)
+
+    results.collect().foreach { rule =>
+      println("[" + rule.antecedent.mkString(",")
+        + "=>"
+        + rule.consequent.mkString(",") + "]," + rule.confidence)
+    }
+    // $example off$
+  }
+
+}
+// scalastyle:on println

http://git-wip-us.apache.org/repos/asf/spark/blob/820064e6/examples/src/main/scala/org/apache/spark/examples/mllib/PrefixSpanExample.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/PrefixSpanExample.scala
 
b/examples/src/main/scala/org/apache/spark/examples/mllib/PrefixSpanExample.scala
new file mode 100644
index 0000000..d237232
--- /dev/null
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/PrefixSpanExample.scala
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// scalastyle:off println
+package org.apache.spark.examples.mllib
+
+// $example on$
+import org.apache.spark.mllib.fpm.PrefixSpan
+// $example off$
+
+import org.apache.spark.{SparkConf, SparkContext}
+
+object PrefixSpanExample {
+
+  def main(args: Array[String]) {
+    val conf = new SparkConf().setAppName("PrefixSpanExample")
+    val sc = new SparkContext(conf)
+
+    // $example on$
+    val sequences = sc.parallelize(Seq(
+      Array(Array(1, 2), Array(3)),
+      Array(Array(1), Array(3, 2), Array(1, 2)),
+      Array(Array(1, 2), Array(5)),
+      Array(Array(6))
+    ), 2).cache()
+    val prefixSpan = new PrefixSpan()
+      .setMinSupport(0.5)
+      .setMaxPatternLength(5)
+    val model = prefixSpan.run(sequences)
+    model.freqSequences.collect().foreach { freqSequence =>
+      println(
+        freqSequence.sequence.map(_.mkString("[", ", ", "]")).mkString("[", ", 
", "]") +
+          ", " + freqSequence.freq)
+    }
+    // $example off$
+  }
+}
+// scalastyle:off println

http://git-wip-us.apache.org/repos/asf/spark/blob/820064e6/examples/src/main/scala/org/apache/spark/examples/mllib/SimpleFPGrowth.scala
----------------------------------------------------------------------
diff --git 
a/examples/src/main/scala/org/apache/spark/examples/mllib/SimpleFPGrowth.scala 
b/examples/src/main/scala/org/apache/spark/examples/mllib/SimpleFPGrowth.scala
new file mode 100644
index 0000000..b4e06af
--- /dev/null
+++ 
b/examples/src/main/scala/org/apache/spark/examples/mllib/SimpleFPGrowth.scala
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// scalastyle:off println
+package org.apache.spark.examples.mllib
+
+// $example on$
+import org.apache.spark.mllib.fpm.FPGrowth
+import org.apache.spark.rdd.RDD
+// $example off$
+
+import org.apache.spark.{SparkContext, SparkConf}
+
+object SimpleFPGrowth {
+
+  def main(args: Array[String]) {
+
+    val conf = new SparkConf().setAppName("SimpleFPGrowth")
+    val sc = new SparkContext(conf)
+
+    // $example on$
+    val data = sc.textFile("data/mllib/sample_fpgrowth.txt")
+
+    val transactions: RDD[Array[String]] = data.map(s => s.trim.split(' '))
+
+    val fpg = new FPGrowth()
+      .setMinSupport(0.2)
+      .setNumPartitions(10)
+    val model = fpg.run(transactions)
+
+    model.freqItemsets.collect().foreach { itemset =>
+      println(itemset.items.mkString("[", ",", "]") + ", " + itemset.freq)
+    }
+
+    val minConfidence = 0.8
+    model.generateAssociationRules(minConfidence).collect().foreach { rule =>
+      println(
+        rule.antecedent.mkString("[", ",", "]")
+          + " => " + rule.consequent .mkString("[", ",", "]")
+          + ", " + rule.confidence)
+    }
+    // $example off$
+  }
+}
+// scalastyle:on println


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to