I am using PySpark with IPython notebook.

<pre>
data = sc.parallelize(range(1000), 10)

#successful
data.map(lambda x: x+1).collect() 

#Error
data.count()
</pre>


Something
similar:http://apache-spark-user-list.1001560.n3.nabble.com/Exception-on-simple-pyspark-script-td3415.html

But it does not figure out how to solve it. Any one help?
<pre>
---------------------------------------------------------------------------
Py4JJavaError                             Traceback (most recent call last)
<ipython-input-10-0106b6ff8a89> in <module>()
----> 1 data.count()

/home/workspace/spark-1.0.2-bin-hadoop2/python/pyspark/rdd.pyc in
count(self)
    735         3
    736         """
--> 737         return self.mapPartitions(lambda i: [sum(1 for _ in
i)]).sum()
    738 
    739     def stats(self):

/home/workspace/spark-1.0.2-bin-hadoop2/python/pyspark/rdd.pyc in sum(self)
    726         6.0
    727         """
--> 728         return self.mapPartitions(lambda x:
[sum(x)]).reduce(operator.add)
    729 
    730     def count(self):

/home/workspace/spark-1.0.2-bin-hadoop2/python/pyspark/rdd.pyc in
reduce(self, f)
    646             if acc is not None:
    647                 yield acc
--> 648         vals = self.mapPartitions(func).collect()
    649         return reduce(f, vals)
    650 

/home/workspace/spark-1.0.2-bin-hadoop2/python/pyspark/rdd.pyc in
collect(self)
    610         """
    611         with _JavaStackTrace(self.context) as st:
--> 612           bytesInJava = self._jrdd.collect().iterator()
    613         return
list(self._collect_iterator_through_file(bytesInJava))
    614 

/home/workspace/spark-1.0.2-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/java_gateway.py
in __call__(self, *args)
    535         answer = self.gateway_client.send_command(command)
    536         return_value = get_return_value(answer, self.gateway_client,
--> 537                 self.target_id, self.name)
    538 
    539         for temp_arg in temp_args:

/home/workspace/spark-1.0.2-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/protocol.py
in get_return_value(answer, gateway_client, target_id, name)
    298                 raise Py4JJavaError(
    299                     'An error occurred while calling {0}{1}{2}.\n'.
--> 300                     format(target_id, '.', name), value)
    301             else:
    302                 raise Py4JError(

----------------------------
org.apache.spark.api.python.PythonException: Traceback (most recent call
last):
  File "/home/workspace/spark-0.9.1-bin-hadoop2/python/pyspark/worker.py",
line 77, in main
    serializer.dump_stream(func(split_index, iterator), outfile)
  File
"/home/workspace/spark-0.9.1-bin-hadoop2/python/pyspark/serializers.py",
line 182, in dump_stream
    self.serializer.dump_stream(self._batched(iterator), stream)
  File
"/home/workspace/spark-0.9.1-bin-hadoop2/python/pyspark/serializers.py",
line 117, in dump_stream
    for obj in iterator:
  File
"/home/workspace/spark-0.9.1-bin-hadoop2/python/pyspark/serializers.py",
line 171, in _batched
    for item in iterator:
  File "/home/workspace/spark-1.0.2-bin-hadoop2/python/pyspark/rdd.py", line
642, in func
TypeError: an integer is required

       
org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:115)
       
org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:145)
        org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:78)
        org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
        org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
        org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:111)
        org.apache.spark.scheduler.Task.run(Task.scala:51)
       
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:183)
       
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1110)
       
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:603)
        java.lang.Thread.run(Thread.java:722)
Driver stacktrace:
        at
org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1049)
        at
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1033)
        at
org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1031)
        at
scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
        at
org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1031)
        at
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:635)
        at
org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:635)
        at scala.Option.foreach(Option.scala:236)
        at
org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:635)
        at
org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1234)
        at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
        at akka.actor.ActorCell.invoke(ActorCell.scala:456)
        at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
        at akka.dispatch.Mailbox.run(Mailbox.scala:219)
        at
akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
        at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
        at
scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
        at 
scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
        at
scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
</pre>



--
View this message in context: 
http://apache-spark-user-list.1001560.n3.nabble.com/PySpark-Python-2-7-8-Spark-1-0-2-count-with-TypeError-an-integer-is-required-tp12643.html
Sent from the Apache Spark User List mailing list archive at Nabble.com.

---------------------------------------------------------------------
To unsubscribe, e-mail: user-unsubscr...@spark.apache.org
For additional commands, e-mail: user-h...@spark.apache.org

Reply via email to