ivankelly commented on a change in pull request #1746: PIP-17: impl offload() 
for S3ManagedLedgerOffloader
URL: https://github.com/apache/incubator-pulsar/pull/1746#discussion_r187597447
 
 

 ##########
 File path: 
pulsar-broker/src/test/java/org/apache/pulsar/broker/s3offload/S3ManagedLedgerOffloaderTest.java
 ##########
 @@ -111,5 +186,87 @@ public void testNoBucketConfigured() throws Exception {
             // correct
         }
     }
+
+    @Test
+    public void testOffload() throws Exception {
+        int entryLength = 10;
+        int entryNumberEachBlock = 10;
+        ServiceConfiguration conf = new ServiceConfiguration();
+        
conf.setManagedLedgerOffloadDriver(S3ManagedLedgerOffloader.DRIVER_NAME);
+
+        conf.setS3ManagedLedgerOffloadBucket(BUCKET);
+        conf.setS3ManagedLedgerOffloadRegion("eu-west-1");
+        conf.setS3ManagedLedgerOffloadServiceEndpoint(s3endpoint);
+        conf.setS3ManagedLedgerOffloadMaxBlockSizeInBytes(
+            DataBlockHeaderImpl.getDataStartOffset() + (entryLength + 12) * 
entryNumberEachBlock);
+        LedgerOffloader offloader = S3ManagedLedgerOffloader.create(conf, 
scheduler);
+
+        // offload 30 entries, which will be placed into 3 data blocks.
+        int entryCount = 30;
+        ReadHandle readHandle = buildReadHandle(entryCount);
+        UUID uuid = UUID.randomUUID();
+        offloader.offload(readHandle, uuid, new HashMap<>()).get();
+
+        S3Object obj = s3client.getObject(BUCKET, 
S3ManagedLedgerOffloader.dataBlockOffloadKey(readHandle, uuid));
+        S3Object indexObj = s3client.getObject(BUCKET, 
S3ManagedLedgerOffloader.indexBlockOffloadKey(readHandle, uuid));
+
+        verifyS3ObjectRead(obj, indexObj, readHandle, 3, 30, 
conf.getS3ManagedLedgerOffloadMaxBlockSizeInBytes());
+    }
+
+    @Test
+    public void testOffloadFail() throws Exception {
+        int entryLength = 10;
+        int entryNumberEachBlock = 10;
+        int maxBlockSize = DataBlockHeaderImpl.getDataStartOffset() + 
(entryLength + 12) * entryNumberEachBlock;
+
+        // offload 30 entries, which will be placed into 3 data blocks.
+        int entryCount = 30;
+        ReadHandle readHandle = buildReadHandle(entryCount);
+        UUID uuid = UUID.randomUUID();
+
+        // mock throw exception when initiateMultipartUpload
+        try {
+            AmazonS3 mockS3client = Mockito.spy(s3client);
+            
Mockito.when(mockS3client.initiateMultipartUpload(any())).thenThrow(AmazonServiceException.class);
+            LedgerOffloader offloader = new 
S3ManagedLedgerOffloader(mockS3client, BUCKET, scheduler, maxBlockSize);
+            offloader.offload(readHandle, uuid, new HashMap<>()).get();
+            fail("Should throw exception when initiateMultipartUpload");
+        } catch (Exception e) {
+            // excepted
 
 Review comment:
   Check that the exception thrown is the one we expect to be thrown (i.e. the 
AmazonServiceException)
   Also verify that neither object exists in S3. Do the same for the other 
tests.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to