Thank you for your suggestion, Shaofeng Shi, I try to use hadoop client 2.7.3,
it worked. But I met another probelm:
-------------------------- --------------------------
-------------------------- --------------------------
17/06/21 20:20:39 WARN TaskSetManager: Lost task 0.0 in stage 0.0 (TID 0,
hadoop645.lt.163.org): java.lang.IllegalArgumentException: Failed to find
metadata store by url: kylin_metadata_2_0
_0@hbase
at
org.apache.kylin.common.persistence.ResourceStore.createResourceStore(ResourceStore.java:99)
at
org.apache.kylin.common.persistence.ResourceStore.getStore(ResourceStore.java:110)
at
org.apache.kylin.cube.CubeDescManager.getStore(CubeDescManager.java:370)
at
org.apache.kylin.cube.CubeDescManager.reloadAllCubeDesc(CubeDescManager.java:298)
at
org.apache.kylin.cube.CubeDescManager.<init>(CubeDescManager.java:109)
at
org.apache.kylin.cube.CubeDescManager.getInstance(CubeDescManager.java:81)
at
org.apache.kylin.cube.CubeInstance.getDescriptor(CubeInstance.java:114)
at org.apache.kylin.cube.CubeSegment.getCubeDesc(CubeSegment.java:119)
at org.apache.kylin.cube.kv.RowKeyEncoder.<init>(RowKeyEncoder.java:50)
at
org.apache.kylin.cube.kv.AbstractRowKeyEncoder.createInstance(AbstractRowKeyEncoder.java:48)
at
org.apache.kylin.engine.spark.SparkCubingByLayer$2.call(SparkCubingByLayer.java:205)
at
org.apache.kylin.engine.spark.SparkCubingByLayer$2.call(SparkCubingByLayer.java:193)
at
org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1018)
at
org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1018)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
at
org.apache.spark.util.collection.ExternalSorter.insertAll(ExternalSorter.scala:191)
at
org.apache.spark.shuffle.sort.SortShuffleWriter.write(SortShuffleWriter.scala:64)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
at org.apache.spark.scheduler.Task.run(Task.scala:89)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
17/06/21 20:20:39 INFO TaskSetManager: Starting task 0.1 in stage 0.0 (TID 2,
hadoop645.lt.163.org, partition 0,RACK_LOCAL, 3276 bytes)
17/06/21 20:21:14 WARN TaskSetManager: Lost task 1.0 in stage 0.0 (TID 1,
hadoop645.lt.163.org): java.lang.IllegalArgumentException: Failed to find
metadata store by url: kylin_metadata_2_0
_0@hbase
at
org.apache.kylin.common.persistence.ResourceStore.createResourceStore(ResourceStore.java:99)
at
org.apache.kylin.common.persistence.ResourceStore.getStore(ResourceStore.java:110)
at
org.apache.kylin.cube.CubeDescManager.getStore(CubeDescManager.java:370)
at
org.apache.kylin.cube.CubeDescManager.reloadAllCubeDesc(CubeDescManager.java:298)
at
org.apache.kylin.cube.CubeDescManager.<init>(CubeDescManager.java:109)
at
org.apache.kylin.cube.CubeDescManager.getInstance(CubeDescManager.java:81)
at
org.apache.kylin.cube.CubeInstance.getDescriptor(CubeInstance.java:114)
at org.apache.kylin.cube.CubeSegment.getCubeDesc(CubeSegment.java:119)
at org.apache.kylin.cube.kv.RowKeyEncoder.<init>(RowKeyEncoder.java:50)
at
org.apache.kylin.cube.kv.AbstractRowKeyEncoder.createInstance(AbstractRowKeyEncoder.java:48)
at
org.apache.kylin.engine.spark.SparkCubingByLayer$2.call(SparkCubingByLayer.java:205)
at
org.apache.kylin.engine.spark.SparkCubingByLayer$2.call(SparkCubingByLayer.java:193)
at
org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1018)
at
org.apache.spark.api.java.JavaPairRDD$$anonfun$pairFunToScalaFun$1.apply(JavaPairRDD.scala:1018)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
at
org.apache.spark.util.collection.ExternalSorter.insertAll(ExternalSorter.scala:191)
at
org.apache.spark.shuffle.sort.SortShuffleWriter.write(SortShuffleWriter.scala:64)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
at
org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
at org.apache.spark.scheduler.Task.run(Task.scala:89)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
-------------------------- --------------------------
-------------------------- --------------------------
But I can use the kylin in-build spark-shell to get data from hive and hbase
successfully, just like this:
-------------------------- --------------------------
-------------------------- --------------------------
sqlContext.sql("show tables").take(1)
-------------------------- --------------------------
-------------------------- --------------------------
import org.apache.spark._
import org.apache.spark.rdd.NewHadoopRDD
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.HColumnDescriptor
import org.apache.hadoop.hbase.{HBaseConfiguration, HTableDescriptor}
import org.apache.hadoop.hbase.client.{HBaseAdmin, Put, HTable, Result}
import org.apache.hadoop.hbase.mapreduce.TableInputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
val conf = HBaseConfiguration.create()
conf.set("hbase.zookeeper.quorum", "localhost")
conf.set(TableInputFormat.INPUT_TABLE, "test_table")
val hBaseRDD = sc.newAPIHadoopRDD(conf, classOf[TableInputFormat],
classOf[org.apache.hadoop.hbase.io.ImmutableBytesWritable],
classOf[org.apache.hadoop.hbase.client.Result])
val res = hBaseRDD.take(1)
val rs = res(0)._2
val kv = rs.raw
for(keyvalue <- kv) println("rowkey:"+ new String(keyvalue.getRow)+ " cf:"+new
String(keyvalue.getFamily()) + " column:" + new String(keyvalue.getQualifier) +
" " + "value:"+new String(keyvalue.getValue()))
-------------------------- --------------------------
-------------------------- --------------------------
By the way, I've already put hive-site.xml and hbase-site.xml into the
HADOOP_CONF_DIR and $SPARK_HOME/conf(which is acually $KYLIN_HOME/spark/conf),
and I also set spark.driver.extraClassPath in spark-defaults.conf to attachs
some related jars(hbase-client.jar, hbase-common.jar and so on).
I don't know why, anyone could give me some advice?
2017-06-21
skyyws
发件人:ShaoFeng Shi <[email protected]>
发送时间:2017-06-20 15:13
主题:Re: Re: Build sample error with spark on kylin 2.0.0
收件人:"dev"<[email protected]>
抄送:
Or you can check whether there is old hadoop jars on your cluster,
according to https://issues.apache.org/jira/browse/HADOOP-11064
2017-06-20 9:33 GMT+08:00 skyyws <[email protected]>:
> No, I deploy kylin on linux, this is my machine info:
> --------------------------
> 3.2.0-4-amd64 #1 SMP Debian 3.2.82-1 x86_64 GNU/Linux
> -------------------------
>
> 2017-06-20
>
> skyyws
>
>
>
> 发件人:ShaoFeng Shi <[email protected]>
> 发送时间:2017-06-20 00:10
> 主题:Re: Build sample error with spark on kylin 2.0.0
> 收件人:"dev"<[email protected]>
> 抄送:
>
> Are you running Kylin on windows? If yes, check:
> https://stackoverflow.com/questions/33211599/hadoop-
> error-on-windows-java-lang-unsatisfiedlinkerror
>
> 2017-06-19 21:55 GMT+08:00 skyyws <[email protected]>:
>
> > Hi all,
> > I met an error when using spark engine build kylin sample on step "Build
> > Cube with Spark", here is the exception log:
> > ------------------------------------------------------------
> > -----------------------------
> > Exception in thread "main" java.lang.UnsatisfiedLinkError:
> > org.apache.hadoop.util.NativeCrc32.nativeComputeChunkedSumsByteAr
> > ray(II[BI[BIILjava/lang/String;JZ)V
> > at org.apache.hadoop.util.NativeCrc32.
> > nativeComputeChunkedSumsByteArray(Native Method)
> > at org.apache.hadoop.util.NativeCrc32.
> > calculateChunkedSumsByteArray(NativeCrc32.java:86)
> > at org.apache.hadoop.util.DataChecksum.calculateChunkedSums(
> > DataChecksum.java:430)
> > at org.apache.hadoop.fs.FSOutputSummer.writeChecksumChunks(
> > FSOutputSummer.java:202)
> > at org.apache.hadoop.fs.FSOutputSummer.write1(
> > FSOutputSummer.java:124)
> > at org.apache.hadoop.fs.FSOutputSummer.write(
> > FSOutputSummer.java:110)
> > at org.apache.hadoop.fs.FSDataOutputStream$PositionCache.write(
> > FSDataOutputStream.java:58)
> > at java.io.DataOutputStream.write(DataOutputStream.java:107)
> > at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:80)
> > at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:52)
> > at org.apache.hadoop.io.IOUtils.copyBytes(IOUtils.java:112)
> > at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:366)
> > at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:338)
> > at org.apache.hadoop.fs.FileUtil.copy(FileUtil.java:289)
> > at org.apache.spark.deploy.yarn.Client.copyFileToRemote(
> > Client.scala:317)
> > at org.apache.spark.deploy.yarn.Client.org$apache$spark$
> > deploy$yarn$Client$$distribute$1(Client.scala:407)
> > at org.apache.spark.deploy.yarn.Client$$anonfun$
> > prepareLocalResources$5.apply(Client.scala:446)
> > at org.apache.spark.deploy.yarn.Client$$anonfun$
> > prepareLocalResources$5.apply(Client.scala:444)
> > at scala.collection.immutable.List.foreach(List.scala:318)
> > at org.apache.spark.deploy.yarn.Client.prepareLocalResources(
> > Client.scala:444)
> > at org.apache.spark.deploy.yarn.Client.
> > createContainerLaunchContext(Client.scala:727)
> > at org.apache.spark.deploy.yarn.Client.submitApplication(
> > Client.scala:142)
> > at org.apache.spark.scheduler.cluster.
> YarnClientSchedulerBackend.
> > start(YarnClientSchedulerBackend.scala:57)
> > at org.apache.spark.scheduler.TaskSchedulerImpl.start(
> > TaskSchedulerImpl.scala:144)
> > at org.apache.spark.SparkContext.<init>(SparkContext.scala:530)
> > at org.apache.spark.api.java.JavaSparkContext.<init>(
> > JavaSparkContext.scala:59)
> > at org.apache.kylin.engine.spark.SparkCubingByLayer.execute(
> > SparkCubingByLayer.java:150)
> > at org.apache.kylin.common.util.AbstractApplication.execute(
> > AbstractApplication.java:37)
> > at org.apache.kylin.common.util.SparkEntry.main(SparkEntry.
> > java:44)
> > at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> > at sun.reflect.NativeMethodAccessorImpl.invoke(
> > NativeMethodAccessorImpl.java:57)
> > at sun.reflect.DelegatingMethodAccessorImpl.invoke(
> > DelegatingMethodAccessorImpl.java:43)
> > at java.lang.reflect.Method.invoke(Method.java:606)
> > at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$
> > deploy$SparkSubmit$$runMain(SparkSubmit.scala:731)
> > at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(
> > SparkSubmit.scala:181)
> > at org.apache.spark.deploy.SparkSubmit$.submit(
> > SparkSubmit.scala:206)
> > at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.
> > scala:121)
> > at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
> > 17/06/19 21:22:06 INFO storage.DiskBlockManager: Shutdown hook called
> > 17/06/19 21:22:06 INFO util.ShutdownHookManager: Shutdown hook called
> > 17/06/19 21:22:06 INFO util.ShutdownHookManager: Deleting directory
> > /tmp/spark-0d1d3709-86cd-446c-b728-5070f168de28
> > 17/06/19 21:22:06 INFO util.ShutdownHookManager: Deleting directory
> > /tmp/spark-0d1d3709-86cd-446c-b728-5070f168de28/httpd-
> > 9bcb9a5d-569f-4f28-ad89-038a9020eda8
> > 17/06/19 21:22:06 INFO util.ShutdownHookManager: Deleting directory
> > /tmp/spark-0d1d3709-86cd-446c-b728-5070f168de28/userFiles-
> > 2e9ff265-3d37-40e0-8894-6fd4d1a3ad8b
> >
> > at org.apache.kylin.common.util.CliCommandExecutor.execute(
> > CliCommandExecutor.java:92)
> > at org.apache.kylin.engine.spark.SparkExecutable.doWork(
> > SparkExecutable.java:124)
> > at org.apache.kylin.job.execution.AbstractExecutable.
> > execute(AbstractExecutable.java:124)
> > at org.apache.kylin.job.execution.DefaultChainedExecutable.
> doWork(
> > DefaultChainedExecutable.java:64)
> > at org.apache.kylin.job.execution.AbstractExecutable.
> > execute(AbstractExecutable.java:124)
> > at org.apache.kylin.job.impl.threadpool.DefaultScheduler$
> > JobRunner.run(DefaultScheduler.java:142)
> > at java.util.concurrent.ThreadPoolExecutor.runWorker(
> > ThreadPoolExecutor.java:1145)
> > at java.util.concurrent.ThreadPoolExecutor$Worker.run(
> > ThreadPoolExecutor.java:615)
> > at java.lang.Thread.run(Thread.java:745)
> >
> > ------------------------------------------------------------
> > -----------------------------
> > I can use the kylin in-build spark-shell to do some operations like:
> > ------------------------------------------------------------
> > -----------------------------
> > var textFile = sc.textFile("hdfs://xxxx/xxxx/README.md")
> > textFile.count()
> > textFile.first()
> > textFile.filter(line => line.contains("hello")).count()
> > ------------------------------------------------------------
> > -----------------------------
> > Here is the env info:
> > kylin version is 2.0.0
> > hadoop version is 2.7.*
> > spark version is 1.6.*
> > ------------------------------------------------------------
> > -----------------------------
> > Anyone can help me?THX
> >
> >
> > 2017-06-19
> > skyyws
>
>
>
>
> --
> Best regards,
>
> Shaofeng Shi 史少锋
>
--
Best regards,
Shaofeng Shi 史少锋