[ 
https://issues.apache.org/jira/browse/HADOOP-14596?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16066672#comment-16066672
 ] 

Steve Loughran commented on HADOOP-14596:
-----------------------------------------

Testing. S3 ireland with SSE-KMS enabled. One transient failure, which has been 
surfacing intermittently for a few days. Goes away on retries

{code}
testRecursiveRootListing(org.apache.hadoop.fs.contract.s3a.ITestS3AContractRootDir)
  Time elapsed: 6.342 sec  <<< ERROR!
org.apache.hadoop.fs.s3a.AWSS3IOException: getFileStatus on user/stevel/: 
com.amazonaws.services.s3.model.AmazonS3Exception: Bad Request (Service: Amazon 
S3; Status Code: 400; Error Code: 400 Bad Request; Request ID: 
7E712B0C2E79FF7D), S3 Extended Request ID: 
XgJeRFL1h2Q5PnAU6BhsjVt7kV2UBIumfnG5p0UihS6si9h9D5sYaw7Kt543FKxbAvbtQGGUiV0=: 
Bad Request (Service: Amazon S3; Status Code: 400; Error Code: 400 Bad Request; 
Request ID: 7E712B0C2E79FF7D)
        at 
org.apache.hadoop.fs.s3a.S3AUtils.translateException(S3AUtils.java:194)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.getFileStatus(S3AFileSystem.java:1653)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.innerListStatus(S3AFileSystem.java:1450)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.listStatus(S3AFileSystem.java:1426)
        at 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk(ContractTestUtils.java:1211)
        at 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk(ContractTestUtils.java:1218)
        at 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk(ContractTestUtils.java:1218)
        at 
org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest.testRecursiveRootListing(AbstractContractRootDirectoryTest.java:221)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
        at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
        at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
Caused by: com.amazonaws.services.s3.model.AmazonS3Exception: Bad Request 
(Service: Amazon S3; Status Code: 400; Error Code: 400 Bad Request; Request ID: 
7E712B0C2E79FF7D)
        at 
com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1588)
        at 
com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1258)
        at 
com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1030)
        at 
com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:742)
        at 
com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:716)
        at 
com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
        at 
com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
        at 
com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
        at 
com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
        at 
com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4221)
        at 
com.amazonaws.services.s3.AmazonS3Client.invoke(AmazonS3Client.java:4168)
        at 
com.amazonaws.services.s3.AmazonS3Client.getObjectMetadata(AmazonS3Client.java:1249)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.getObjectMetadata(S3AFileSystem.java:941)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.getFileStatus(S3AFileSystem.java:1636)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.innerListStatus(S3AFileSystem.java:1450)
        at 
org.apache.hadoop.fs.s3a.S3AFileSystem.listStatus(S3AFileSystem.java:1426)
        at 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk(ContractTestUtils.java:1211)
        at 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk(ContractTestUtils.java:1218)
        at 
org.apache.hadoop.fs.contract.ContractTestUtils.treeWalk(ContractTestUtils.java:1218)
        at 
org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest.testRecursiveRootListing(AbstractContractRootDirectoryTest.java:221)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:44)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
        at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
        at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
        at 
org.junit.internal.runners.statements.FailOnTimeout$StatementThread.run(FailOnTimeout.java:74)
{code}

> AWS SDK 1.11+ aborts() on close() if > 0 bytes in stream; logs error
> --------------------------------------------------------------------
>
>                 Key: HADOOP-14596
>                 URL: https://issues.apache.org/jira/browse/HADOOP-14596
>             Project: Hadoop Common
>          Issue Type: Sub-task
>          Components: fs/s3
>            Reporter: Steve Loughran
>            Assignee: Steve Loughran
>            Priority: Minor
>         Attachments: HADOOP-14596-001.patch, HADOOP-14596-002.patch, 
> testlog.txt
>
>
> The latest SDK now tells us off when we do a seek() by aborting the TCP stream
> {code}
> - Not all bytes were read from the S3ObjectInputStream, aborting HTTP 
> connection. This is likely an error and may result in sub-optimal behavior. 
> Request only the bytes you need via a ranged GET or drain the input stream 
> after use.
> 2017-06-27 15:47:35,789 [ScalaTest-main-running-S3ACSVReadSuite] WARN  
> internal.S3AbortableInputStream (S3AbortableInputStream.java:close(163)) - 
> Not all bytes were read from the S3ObjectInputStream, aborting HTTP 
> connection. This is likely an error and may result in sub-optimal behavior. 
> Request only the bytes you need via a ranged GET or drain the input stream 
> after use.
> 2017-06-27 15:47:37,409 [ScalaTest-main-running-S3ACSVReadSuite] WARN  
> internal.S3AbortableInputStream (S3AbortableInputStream.java:close(163)) - 
> Not all bytes were read from the S3ObjectInputStream, aborting HTTP 
> connection. This is likely an error and may result in sub-optimal behavior. 
> Request only the bytes you need via a ranged GET or drain the input stream 
> after use.
> 2017-06-27 15:47:39,003 [ScalaTest-main-running-S3ACSVReadSuite] WARN  
> internal.S3AbortableInputStream (S3AbortableInputStream.java:close(163)) - 
> Not all bytes were read from the S3ObjectInputStream, aborting HTTP 
> connection. This is likely an error and may result in sub-optimal behavior. 
> Request only the bytes you need via a ranged GET or drain the input stream 
> after use.
> 2017-06-27 15:47:40,627 [ScalaTest-main-running-S3ACSVReadSuite] WARN  
> internal.S3AbortableInputStream (S3AbortableInputStream.java:close(163)) - 
> Not all bytes were read from the S3ObjectInputStream, aborting HTTP 
> connection. This is likely an error and may result in sub-optimal behavior. 
> Request only the bytes you need via a ranged GET or drain the input stream 
> after use.
> {code}



--
This message was sent by Atlassian JIRA
(v6.4.14#64029)

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to