[ 
https://issues.apache.org/jira/browse/SPARK-21138?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

sharkd tu updated SPARK-21138:
------------------------------
    Description: 
When I set different clusters for "spark.hadoop.fs.defaultFS" and 
"spark.yarn.stagingDir" as follows:

{code:java}
spark.hadoop.fs.defaultFS  hdfs://tl-nn-tdw.tencent-distribute.com:54310
spark.yarn.stagingDir hdfs://ss-teg-2-v2/tmp/spark
{code}

I got following logs:

{code:java}
17/06/19 17:55:48 INFO SparkContext: Successfully stopped SparkContext
17/06/19 17:55:48 INFO ApplicationMaster: Unregistering ApplicationMaster with 
SUCCEEDED
17/06/19 17:55:48 INFO AMRMClientImpl: Waiting for application to be 
successfully unregistered.
17/06/19 17:55:48 INFO ApplicationMaster: Deleting staging directory 
hdfs://ss-teg-2-v2/tmp/spark/.sparkStaging/application_1496819138021_77618
17/06/19 17:55:48 ERROR Utils: Uncaught exception in thread Thread-2
java.lang.IllegalArgumentException: Wrong FS: 
hdfs://ss-teg-2-v2/tmp/spark/.sparkStaging/application_1496819138021_77618, 
expected: hdfs://tl-nn-tdw.tencent-distribute.com:54310
        at org.apache.hadoop.fs.FileSystem.checkPath(FileSystem.java:642)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem.getPathName(Cdh3DistributedFileSystem.java:197)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem.access$000(Cdh3DistributedFileSystem.java:81)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem$10.doCall(Cdh3DistributedFileSystem.java:644)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem$10.doCall(Cdh3DistributedFileSystem.java:640)
        at 
org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem.delete(Cdh3DistributedFileSystem.java:640)
        at 
org.apache.hadoop.fs.FilterFileSystem.delete(FilterFileSystem.java:216)
        at 
org.apache.spark.deploy.yarn.ApplicationMaster.org$apache$spark$deploy$yarn$ApplicationMaster$$cleanupStagingDir(ApplicationMaster.scala:545)
        at 
org.apache.spark.deploy.yarn.ApplicationMaster$$anonfun$run$1.apply$mcV$sp(ApplicationMaster.scala:233)
        at 
org.apache.spark.util.SparkShutdownHook.run(ShutdownHookManager.scala:216)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:188)
        at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1951)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply$mcV$sp(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:188)
        at scala.util.Try$.apply(Try.scala:192)
        at 
org.apache.spark.util.SparkShutdownHookManager.runAll(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anon$2.run(ShutdownHookManager.scala:178)
        at 
org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
{code}


  was:
When I set different clusters for "spark.hadoop.fs.defaultFS" and 
"spark.yarn.stagingDir" as follows:

{code:java}
spark.hadoop.fs.defaultFS  hdfs://tl-nn-tdw.tencent-distribute.com:54310
spark.yarn.stagingDir hdfs://ss-teg-2-v2/tmp/spark
{code}

I got following logs:


17/06/19 17:55:48 INFO SparkContext: Successfully stopped SparkContext
17/06/19 17:55:48 INFO ApplicationMaster: Unregistering ApplicationMaster with 
SUCCEEDED
17/06/19 17:55:48 INFO AMRMClientImpl: Waiting for application to be 
successfully unregistered.
17/06/19 17:55:48 INFO ApplicationMaster: Deleting staging directory 
hdfs://ss-teg-2-v2/tmp/spark/.sparkStaging/application_1496819138021_77618
17/06/19 17:55:48 ERROR Utils: Uncaught exception in thread Thread-2
java.lang.IllegalArgumentException: Wrong FS: 
hdfs://ss-teg-2-v2/tmp/spark/.sparkStaging/application_1496819138021_77618, 
expected: hdfs://tl-nn-tdw.tencent-distribute.com:54310
        at org.apache.hadoop.fs.FileSystem.checkPath(FileSystem.java:642)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem.getPathName(Cdh3DistributedFileSystem.java:197)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem.access$000(Cdh3DistributedFileSystem.java:81)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem$10.doCall(Cdh3DistributedFileSystem.java:644)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem$10.doCall(Cdh3DistributedFileSystem.java:640)
        at 
org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
        at 
org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem.delete(Cdh3DistributedFileSystem.java:640)
        at 
org.apache.hadoop.fs.FilterFileSystem.delete(FilterFileSystem.java:216)
        at 
org.apache.spark.deploy.yarn.ApplicationMaster.org$apache$spark$deploy$yarn$ApplicationMaster$$cleanupStagingDir(ApplicationMaster.scala:545)
        at 
org.apache.spark.deploy.yarn.ApplicationMaster$$anonfun$run$1.apply$mcV$sp(ApplicationMaster.scala:233)
        at 
org.apache.spark.util.SparkShutdownHook.run(ShutdownHookManager.scala:216)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:188)
        at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1951)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply$mcV$sp(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:188)
        at scala.util.Try$.apply(Try.scala:192)
        at 
org.apache.spark.util.SparkShutdownHookManager.runAll(ShutdownHookManager.scala:188)
        at 
org.apache.spark.util.SparkShutdownHookManager$$anon$2.run(ShutdownHookManager.scala:178)
        at 
org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)


> Cannot delete staging dir when the clusters of "spark.yarn.stagingDir" and 
> "spark.hadoop.fs.defaultFS" are different 
> ---------------------------------------------------------------------------------------------------------------------
>
>                 Key: SPARK-21138
>                 URL: https://issues.apache.org/jira/browse/SPARK-21138
>             Project: Spark
>          Issue Type: Bug
>          Components: YARN
>    Affects Versions: 2.1.1
>            Reporter: sharkd tu
>
> When I set different clusters for "spark.hadoop.fs.defaultFS" and 
> "spark.yarn.stagingDir" as follows:
> {code:java}
> spark.hadoop.fs.defaultFS  hdfs://tl-nn-tdw.tencent-distribute.com:54310
> spark.yarn.stagingDir hdfs://ss-teg-2-v2/tmp/spark
> {code}
> I got following logs:
> {code:java}
> 17/06/19 17:55:48 INFO SparkContext: Successfully stopped SparkContext
> 17/06/19 17:55:48 INFO ApplicationMaster: Unregistering ApplicationMaster 
> with SUCCEEDED
> 17/06/19 17:55:48 INFO AMRMClientImpl: Waiting for application to be 
> successfully unregistered.
> 17/06/19 17:55:48 INFO ApplicationMaster: Deleting staging directory 
> hdfs://ss-teg-2-v2/tmp/spark/.sparkStaging/application_1496819138021_77618
> 17/06/19 17:55:48 ERROR Utils: Uncaught exception in thread Thread-2
> java.lang.IllegalArgumentException: Wrong FS: 
> hdfs://ss-teg-2-v2/tmp/spark/.sparkStaging/application_1496819138021_77618, 
> expected: hdfs://tl-nn-tdw.tencent-distribute.com:54310
>       at org.apache.hadoop.fs.FileSystem.checkPath(FileSystem.java:642)
>       at 
> org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem.getPathName(Cdh3DistributedFileSystem.java:197)
>       at 
> org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem.access$000(Cdh3DistributedFileSystem.java:81)
>       at 
> org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem$10.doCall(Cdh3DistributedFileSystem.java:644)
>       at 
> org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem$10.doCall(Cdh3DistributedFileSystem.java:640)
>       at 
> org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81)
>       at 
> org.apache.hadoopcdh3.hdfs.Cdh3DistributedFileSystem.delete(Cdh3DistributedFileSystem.java:640)
>       at 
> org.apache.hadoop.fs.FilterFileSystem.delete(FilterFileSystem.java:216)
>       at 
> org.apache.spark.deploy.yarn.ApplicationMaster.org$apache$spark$deploy$yarn$ApplicationMaster$$cleanupStagingDir(ApplicationMaster.scala:545)
>       at 
> org.apache.spark.deploy.yarn.ApplicationMaster$$anonfun$run$1.apply$mcV$sp(ApplicationMaster.scala:233)
>       at 
> org.apache.spark.util.SparkShutdownHook.run(ShutdownHookManager.scala:216)
>       at 
> org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply$mcV$sp(ShutdownHookManager.scala:188)
>       at 
> org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:188)
>       at 
> org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1$$anonfun$apply$mcV$sp$1.apply(ShutdownHookManager.scala:188)
>       at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:1951)
>       at 
> org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply$mcV$sp(ShutdownHookManager.scala:188)
>       at 
> org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:188)
>       at 
> org.apache.spark.util.SparkShutdownHookManager$$anonfun$runAll$1.apply(ShutdownHookManager.scala:188)
>       at scala.util.Try$.apply(Try.scala:192)
>       at 
> org.apache.spark.util.SparkShutdownHookManager.runAll(ShutdownHookManager.scala:188)
>       at 
> org.apache.spark.util.SparkShutdownHookManager$$anon$2.run(ShutdownHookManager.scala:178)
>       at 
> org.apache.hadoop.util.ShutdownHookManager$1.run(ShutdownHookManager.java:54)
> {code}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to