gt;>>>> org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:50)
>>>>> at
>>>>> org.apache.spark.streaming.dstream.ForEachDStream$$anonfun$1$$anonfun$apply$mcV$sp$1.apply(ForEachDStream.scala:50)
>>>
va.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>>>>at java.lang.Thread.run(Thread.java:745)
>>>> 16/05/25 11:30:30 INFO BlockManager: Removing RDD 105
>>>>
>>>>
>>>> This is the code that rises the exception
RDD(_.foreachPartition(it =>
>>> recommender.predictWithALS(it.toSeq)))*
>>>
>>> println("<---POSSIBLE SOLUTION--->")
>>>
>>> }
>>> }
>>> )
>>> }catch{
>>> ca
val endAls = DateTime.now
>> val result =
>> recommendations.sortBy(-_.rating).take(NumRecommendations).map(toAmazonRating)
>> val alsTime = Seconds.secondsBetween(startAls, endAls).getSeconds
>>
>> println(s"ALS Time: $alsTime seconds")
>> result
&
ing
> AmazonPageParser.parse(productId,userId,rating).onSuccess { case
> amazonRating =>
> //Is this the correct way? the best performance? possibly not, what
> about using avro or parquet?
> producer.send(Json.toJson(amazonRating).toString)
> //producer.send
er.parse(productId,userId,rating).onSuccess { case
> amazonRating =>
> //Is this the correct way? the best performance? possibly not, what
> about using avro or parquet?
> producer.send(Json.toJson(amazonRating).toString)
> //producer.send(amazonRating)
> println(&
erflow post
<http://stackoverflow.com/questions/37303202/about-an-error-accessing-a-field-inside-tuple2>,
with more details, please help, i am stuck with this issue and i don't know
how to continue.
Regards
Alonso Isidoro Roman
[image: https://]about.me/alonso.isidoro.roman
<https://abou
Hi, i am receiving this exception when direct spark streaming process tries
to pull data from kafka topic:
16/05/25 11:30:30 INFO CheckpointWriter: Checkpoint for time 146416863
ms saved to file
'file:/Users/aironman/my-recommendation-spark-engine/checkpoint/checkpoint-146416863',
took