wchevreuil commented on a change in pull request #749: HBASE-23205 Correctly 
update the position of WALs currently being replicated
URL: https://github.com/apache/hbase/pull/749#discussion_r347130863
 
 

 ##########
 File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceWALReaderThread.java
 ##########
 @@ -135,59 +127,46 @@ public void run() {
       try (WALEntryStream entryStream =
           new WALEntryStream(logQueue, fs, conf, currentPosition, metrics)) {
         while (isReaderRunning()) { // loop here to keep reusing stream while 
we can
-          if (!checkQuota()) {
+          if (manager.isBufferQuotaReached()) {
+            Threads.sleep(sleepForRetries);
             continue;
           }
-          WALEntryBatch batch = null;
-          while (entryStream.hasNext()) {
-            if (batch == null) {
-              batch = new WALEntryBatch(replicationBatchCountCapacity, 
entryStream.getCurrentPath());
-            }
+          WALEntryBatch batch =
+                  new WALEntryBatch(replicationBatchCountCapacity, 
replicationBatchSizeCapacity);
+          boolean hasNext;
+          while ((hasNext = entryStream.hasNext()) == true) {
             Entry entry = entryStream.next();
             entry = filterEntry(entry);
             if (entry != null) {
               WALEdit edit = entry.getEdit();
               if (edit != null && !edit.isEmpty()) {
-                long entrySize = getEntrySizeIncludeBulkLoad(entry);
-                long entrySizeExlucdeBulkLoad = 
getEntrySizeExcludeBulkLoad(entry);
-                batch.addEntry(entry);
-                replicationSourceManager.setPendingShipment(true);
-                updateBatchStats(batch, entry, entryStream.getPosition(), 
entrySize);
-                boolean totalBufferTooLarge = 
acquireBufferQuota(entrySizeExlucdeBulkLoad);
+                long entrySizeExcludeBulkLoad = batch.addEntry(entry);
+                boolean totalBufferTooLarge = 
manager.acquireBufferQuota(entrySizeExcludeBulkLoad);
                 // Stop if too many entries or too big
-                if (totalBufferTooLarge || batch.getHeapSize() >= 
replicationBatchSizeCapacity
-                    || batch.getNbEntries() >= replicationBatchCountCapacity) {
+                if (totalBufferTooLarge || batch.isLimitReached()) {
                   break;
                 }
               }
-            } else {
-              
replicationSourceManager.logPositionAndCleanOldLogs(entryStream.getCurrentPath(),
-                this.replicationQueueInfo.getPeerClusterZnode(),
-                entryStream.getPosition(),
-                this.replicationQueueInfo.isQueueRecovered(), false);
             }
           }
-          if (batch != null && (!batch.getLastSeqIds().isEmpty() || 
batch.getNbEntries() > 0)) {
-            if (LOG.isTraceEnabled()) {
-              LOG.trace(String.format("Read %s WAL entries eligible for 
replication",
-                batch.getNbEntries()));
-            }
-            entryBatchQueue.put(batch);
+
+          if (LOG.isTraceEnabled()) {
 
 Review comment:
   This can lead to confusion, adds extra complexity for troubleshooting and is 
even misleading, as the message says the entries are eligible for replication, 
even if they got filtered, which means just the opposite.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to