[ 
https://issues.apache.org/jira/browse/HADOOP-17451?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17259055#comment-17259055
 ] 

Steve Loughran commented on HADOOP-17451:
-----------------------------------------

put byte count == 0
{code}
[ERROR] 
test_010_CreateHugeFile(org.apache.hadoop.fs.s3a.scale.ITestS3AHugeFilesDiskBlocks)
  Time elapsed: 6.746 s  <<< FAILURE!
java.lang.AssertionError: 
[putByteCount count from filesystem stats counters=((files_created=1) 
(stream_write_block_uploads_aborted=0) (committer_commits_reverted=0) 
(action_http_get_request.failures=0) (committer_magic_files_created=0) 
(object_copy_requests=0) (stream_read_close_operations=0) (store_io_retry=0) 
(stream_write_block_uploads_committed=0) 
(committer_stage_file_upload.failures=0) 
(s3guard_metadatastore_authoritative_directories_updated=0) 
(delegation_token_issued=0) (action_http_head_request=0) (op_create=1) 
(stream_read_fully_operations=0) (committer_commits_completed=0) 
(stream_read_seek_policy_changed=0) (committer_commits_created=0) 
(s3guard_metadatastore_put_path_request=2) (op_get_delegation_token=0) 
(stream_write_exceptions=0) (directories_created=1) (files_delete_rejected=0) 
(stream_write_total_data=20971520) (action_http_get_request=0) 
(files_copied_bytes=0) (op_list_located_status=0) 
(object_bulk_delete_request=1) (committer_commits_aborted=0) 
(action_executor_acquired.failures=0) (committer_stage_file_upload=0) 
(action_http_head_request.failures=0) (stream_read_opened=0) (op_list_status=0) 
(stream_write_queue_duration.failures=0) (op_get_file_checksum=0) 
(ignored_errors=1) (committer_bytes_uploaded=0) (op_list_files=0) 
(files_deleted=0) (op_is_directory=0) (s3guard_metadatastore_throttled=0) 
(stream_read_seek_backward_operations=0) (multipart_upload_started=0) 
(stream_write_total_time=6687) (object_delete_request.failures=0) 
(fake_directories_created=0) (stream_read_seek_operations=0) 
(stream_read_seek_forward_operations=0) (object_put_bytes=10485760) 
(op_is_file=0) (store_io_request=0) (committer_commits.failures=0) 
(stream_write_block_uploads=4) (committer_commit_job=0) 
(object_delete_objects=2) (multipart_upload_part_put=0) (op_open=0) 
(s3guard_metadatastore_record_reads=5) (committer_commit_job.failures=0) 
(s3guard_metadatastore_initialization=1) (object_put_request=3) 
(multipart_upload_abort_under_path_invoked=0) 
(stream_read_bytes_backwards_on_seek=0) (multipart_upload_part_put_bytes=0) 
(stream_read_seek_bytes_discarded=0) (multipart_upload_aborted=0) 
(committer_bytes_committed=0) (committer_materialize_file=0) 
(object_metadata_request=0) (s3guard_metadatastore_retry=0) 
(object_put_request_completed=3) (op_create_non_recursive=0) 
(stream_write_queue_duration=2) (committer_jobs_completed=0) 
(multipart_instantiated=0) (stream_read_operations=0) 
(object_bulk_delete_request.failures=0) (fake_directories_deleted=2) 
(stream_aborted=0) (op_rename=0) (object_multipart_aborted=0) 
(op_get_file_status=0) (s3guard_metadatastore_record_deletes=0) 
(stream_read_total_bytes=0) (committer_materialize_file.failures=0) 
(op_glob_status=0) (delegation_token_issued.failures=0) 
(stream_read_exceptions=0) (action_executor_acquired=2) 
(stream_read_version_mismatches=0) (stream_write_bytes=10485760) (op_exists=0) 
(stream_write_exceptions_completing_upload=0) (object_select_requests=0) 
(object_delete_request=0) (object_multipart_initiated=1) 
(committer_jobs_failed=0) (stream_read_operations_incomplete=0) (op_delete=1) 
(stream_read_bytes=0) (object_list_request.failures=0) 
(object_continue_list_request.failures=0) 
(stream_read_bytes_discarded_in_abort=0) (committer_tasks_completed=0) 
(object_list_request=0) (store_io_throttled=0) (files_copied=0) 
(committer_tasks_failed=0) (s3guard_metadatastore_record_writes=4) 
(stream_read_seek_bytes_skipped=0) (multipart_upload_completed=0) 
(object_continue_list_request=0) (op_mkdirs=1) (op_copy_from_local_file=0) 
(stream_read_closed=0) (directories_deleted=0) 
(stream_read_bytes_discarded_in_close=0));
gauges=();
minimums=((delegation_token_issued.failures.min=-1) 
(stream_write_queue_duration.min=-1) (action_executor_acquired.min=0) 
(object_list_request.min=-1) (object_continue_list_request.failures.min=-1) 
(object_list_request.failures.min=-1) 
(stream_write_queue_duration.failures.min=-1) 
(committer_stage_file_upload.min=-1) (committer_materialize_file.min=-1) 
(action_http_head_request.min=-1) (object_bulk_delete_request.failures.min=-1) 
(object_bulk_delete_request.min=92) (object_delete_request.failures.min=-1) 
(action_http_get_request.failures.min=-1) (delegation_token_issued.min=-1) 
(object_continue_list_request.min=-1) (object_delete_request.min=-1) 
(committer_commit_job.min=-1) (committer_commit_job.failures.min=-1) 
(action_http_get_request.min=-1) (committer_materialize_file.failures.min=-1) 
(committer_stage_file_upload.failures.min=-1) 
(action_executor_acquired.failures.min=-1) 
(action_http_head_request.failures.min=-1));
maximums=((committer_materialize_file.failures.max=-1) 
(action_http_head_request.max=-1) (committer_commit_job.max=-1) 
(object_continue_list_request.max=-1) (object_bulk_delete_request.max=92) 
(object_delete_request.failures.max=-1) 
(action_http_get_request.failures.max=-1) (committer_materialize_file.max=-1) 
(delegation_token_issued.max=-1) (action_http_head_request.failures.max=-1) 
(stream_write_queue_duration.max=-1) (object_list_request.failures.max=-1) 
(action_http_get_request.max=-1) (committer_commit_job.failures.max=-1) 
(action_executor_acquired.failures.max=-1) 
(committer_stage_file_upload.failures.max=-1) 
(object_bulk_delete_request.failures.max=-1) (object_delete_request.max=-1) 
(delegation_token_issued.failures.max=-1) (action_executor_acquired.max=1) 
(object_continue_list_request.failures.max=-1) 
(committer_stage_file_upload.max=-1) 
(stream_write_queue_duration.failures.max=-1) (object_list_request.max=-1));
means=((action_http_get_request.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(object_delete_request.mean=(samples=0, sum=0, mean=0.0000)) 
(committer_materialize_file.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(object_continue_list_request.mean=(samples=0, sum=0, mean=0.0000)) 
(action_http_get_request.mean=(samples=0, sum=0, mean=0.0000)) 
(stream_write_queue_duration.mean=(samples=0, sum=0, mean=0.0000)) 
(action_http_head_request.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(action_executor_acquired.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(committer_commit_job.mean=(samples=0, sum=0, mean=0.0000)) 
(committer_stage_file_upload.mean=(samples=0, sum=0, mean=0.0000)) 
(committer_materialize_file.mean=(samples=0, sum=0, mean=0.0000)) 
(action_executor_acquired.mean=(samples=4, sum=2, mean=0.5000)) 
(action_http_head_request.mean=(samples=0, sum=0, mean=0.0000)) 
(committer_commit_job.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(stream_write_queue_duration.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(committer_stage_file_upload.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(object_list_request.mean=(samples=0, sum=0, mean=0.0000)) 
(object_continue_list_request.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(object_delete_request.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(delegation_token_issued.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(object_bulk_delete_request.mean=(samples=1, sum=92, mean=92.0000)) 
(object_bulk_delete_request.failures.mean=(samples=0, sum=0, mean=0.0000)) 
(delegation_token_issued.mean=(samples=0, sum=0, mean=0.0000)) 
(object_list_request.failures.mean=(samples=0, sum=0, mean=0.0000)));
] 
Expecting:
 <0L>
to be greater than:
 <0L> 
        at 
org.apache.hadoop.fs.s3a.scale.AbstractSTestS3AHugeFiles.test_010_CreateHugeFile(AbstractSTestS3AHugeFiles.java:243)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:50)
        at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
        at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:47)
        at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
        at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
        at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
        at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:55)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:298)
        at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:292)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.lang.Thread.run(Thread.java:748)

{code}

> intermittent failure of S3A huge file upload tests: count of bytes uploaded 
> == 0
> --------------------------------------------------------------------------------
>
>                 Key: HADOOP-17451
>                 URL: https://issues.apache.org/jira/browse/HADOOP-17451
>             Project: Hadoop Common
>          Issue Type: Sub-task
>          Components: fs/s3
>    Affects Versions: 3.4.0
>            Reporter: Steve Loughran
>            Assignee: Steve Loughran
>            Priority: Major
>
> Intermittent failure of ITestHuge* upload tests, when doing parallel test 
> runs.
> The count of bytes uploaded through StorageStatistics isn't updated. Maybe 
> the expected counter isn't updated, and somehow in a parallel run with 
> recycled FS instances/set up directory structure this surfaces the way it 
> doesn't in a single test run.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: common-issues-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-issues-h...@hadoop.apache.org

Reply via email to