[ 
https://issues.apache.org/jira/browse/HIVE-27019?focusedWorklogId=845774&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-845774
 ]

ASF GitHub Bot logged work on HIVE-27019:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 16/Feb/23 06:09
            Start Date: 16/Feb/23 06:09
    Worklog Time Spent: 10m 
      Work Description: SourabhBadhya commented on code in PR #4032:
URL: https://github.com/apache/hive/pull/4032#discussion_r1108051660


##########
ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestHandler.java:
##########
@@ -0,0 +1,149 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.txn.compactor.handler;
+
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.ql.txn.compactor.Cleaner;
+import org.apache.hadoop.hive.ql.txn.compactor.CleaningRequest;
+import org.apache.hadoop.hive.ql.txn.compactor.TestCleaner;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import static 
org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_COMPACTOR_DELAYED_CLEANUP_ENABLED;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.nullable;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+
+public class TestHandler extends TestCleaner {
+
+  @Test
+  public void testCompactionHandlerForSuccessfulCompaction() throws Exception {
+    Table t = newTable("default", "handler_success_table", true);
+    Partition p = newPartition(t, "today");
+    addBaseFile(t, p, 20L, 20);
+    addDeltaFile(t, p, 21L, 22L, 2);
+    addDeltaFile(t, p, 23L, 24L, 2);
+    addBaseFile(t, p, 25L, 25);
+
+    burnThroughTransactions(t.getDbName(), t.getTableName(), 25);
+
+    CompactionRequest rqst = new CompactionRequest(t.getDbName(), 
t.getTableName(), CompactionType.MAJOR);
+    rqst.setPartitionname("ds=today");
+    compactInTxn(rqst);
+
+    Handler handler = new CompactionHandler(conf, txnHandler, false);
+
+    // Fetch the compaction request using the handler
+    List<CleaningRequest> cleaningRequests = handler.findReadyToClean();
+    Assert.assertEquals(1, cleaningRequests.size());
+    CleaningRequest cr = cleaningRequests.get(0);
+    Assert.assertEquals(t.getDbName(), cr.getDbName());
+    Assert.assertEquals(t.getTableName(), cr.getTableName());
+    Assert.assertEquals("ds=today", cr.getPartitionName());
+    Assert.assertEquals(CleaningRequest.RequestType.COMPACTION, cr.getType());
+
+    // Check whether appropriate handler utility methods are called exactly 
once in a successful compaction scenario.
+    Handler mockedHandler = Mockito.spy(handler);
+    AtomicBoolean stop = new AtomicBoolean(true);
+    Cleaner cleaner = new Cleaner(Arrays.asList(mockedHandler));
+    cleaner.setConf(conf);
+    cleaner.init(stop);
+    cleaner.run();
+
+    Mockito.verify(mockedHandler, Mockito.times(1)).findReadyToClean();
+    Mockito.verify(mockedHandler, 
Mockito.times(1)).beforeExecutingCleaningRequest(any(CleaningRequest.class));
+    Mockito.verify(mockedHandler, 
Mockito.times(1)).afterExecutingCleaningRequest(any(CleaningRequest.class), 
any(List.class));

Review Comment:
   Added assert. Done.





Issue Time Tracking
-------------------

    Worklog Id:     (was: 845774)
    Time Spent: 4h 20m  (was: 4h 10m)

> Split Cleaner into separate manageable modular entities
> -------------------------------------------------------
>
>                 Key: HIVE-27019
>                 URL: https://issues.apache.org/jira/browse/HIVE-27019
>             Project: Hive
>          Issue Type: Sub-task
>            Reporter: Sourabh Badhya
>            Assignee: Sourabh Badhya
>            Priority: Major
>              Labels: pull-request-available
>          Time Spent: 4h 20m
>  Remaining Estimate: 0h
>
> As described by the parent task - 
> Cleaner can be divided into separate entities like -
> *1) Handler* - This entity fetches the data from the metastore DB from 
> relevant tables and converts it into a request entity called CleaningRequest. 
> It would also do SQL operations post cleanup (postprocess). Every type of 
> cleaning request is provided by a separate handler.
> *2) Filesystem remover* - This entity fetches the cleaning requests from 
> various handlers and deletes them according to the cleaning request.



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

Reply via email to