kadirozde commented on code in PR #5545:
URL: https://github.com/apache/hbase/pull/5545#discussion_r1545467119


##########
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java:
##########
@@ -105,72 +132,44 @@ public class StoreFileWriter implements CellSink, 
ShipperListener {
    * @param fileContext            The HFile context
    * @param shouldDropCacheBehind  Drop pages written to page cache after 
writing the store file.
    * @param compactedFilesSupplier Returns the {@link HStore} compacted files 
which not archived
+   * @param comparator             Cell comparator
+   * @param maxVersions            max cell versions
+   * @param newVersionBehavior     enable new version behavior
    * @throws IOException problem writing to FS
    */
-  private StoreFileWriter(FileSystem fs, Path path, final Configuration conf, 
CacheConfig cacheConf,
-    BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes, 
HFileContext fileContext,
-    boolean shouldDropCacheBehind, Supplier<Collection<HStoreFile>> 
compactedFilesSupplier)
-    throws IOException {
+  private StoreFileWriter(FileSystem fs, Path liveFilePath, Path 
historicalFilePath,
+    final Configuration conf, CacheConfig cacheConf, BloomType bloomType, long 
maxKeys,
+    InetSocketAddress[] favoredNodes, HFileContext fileContext, boolean 
shouldDropCacheBehind,
+    Supplier<Collection<HStoreFile>> compactedFilesSupplier, CellComparator 
comparator,
+    int maxVersions, boolean newVersionBehavior) throws IOException {
+    this.fs = fs;
+    this.historicalFilePath = historicalFilePath;
+    this.conf = conf;
+    this.cacheConf = cacheConf;
+    this.bloomType = bloomType;
+    this.maxKeys = maxKeys;
+    this.favoredNodes = favoredNodes;
+    this.fileContext = fileContext;
+    this.shouldDropCacheBehind = shouldDropCacheBehind;
     this.compactedFilesSupplier = compactedFilesSupplier;
-    this.timeRangeTracker = 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC);
-    // TODO : Change all writers to be specifically created for compaction 
context
-    writer =
-      HFile.getWriterFactory(conf, cacheConf).withPath(fs, 
path).withFavoredNodes(favoredNodes)
-        
.withFileContext(fileContext).withShouldDropCacheBehind(shouldDropCacheBehind).create();
-
-    generalBloomFilterWriter = 
BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf,
-      bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
-
-    if (generalBloomFilterWriter != null) {
-      this.bloomType = bloomType;
-      this.bloomParam = BloomFilterUtil.getBloomFilterParam(bloomType, conf);
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Bloom filter type for " + path + ": " + this.bloomType + ", 
param: "
-          + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH
-            ? Bytes.toInt(bloomParam)
-            : Bytes.toStringBinary(bloomParam))
-          + ", " + generalBloomFilterWriter.getClass().getSimpleName());
-      }
-      // init bloom context
-      switch (bloomType) {
-        case ROW:
-          bloomContext =
-            new RowBloomContext(generalBloomFilterWriter, 
fileContext.getCellComparator());
-          break;
-        case ROWCOL:
-          bloomContext =
-            new RowColBloomContext(generalBloomFilterWriter, 
fileContext.getCellComparator());
-          break;
-        case ROWPREFIX_FIXED_LENGTH:
-          bloomContext = new 
RowPrefixFixedLengthBloomContext(generalBloomFilterWriter,
-            fileContext.getCellComparator(), Bytes.toInt(bloomParam));
-          break;
-        default:
-          throw new IOException(
-            "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL or 
ROWPREFIX expected)");
-      }
-    } else {
-      // Not using Bloom filters.
-      this.bloomType = BloomType.NONE;
-    }
+    this.comparator = comparator;
+    this.maxVersions = maxVersions;
+    this.newVersionBehavior = newVersionBehavior;
+    liveFileWriter = new SingleStoreFileWriter(fs, liveFilePath, conf, 
cacheConf, bloomType,
+      maxKeys, favoredNodes, fileContext, shouldDropCacheBehind, 
compactedFilesSupplier);
+  }
 
-    // initialize delete family Bloom filter when there is NO RowCol Bloom 
filter
-    if (this.bloomType != BloomType.ROWCOL) {
-      this.deleteFamilyBloomFilterWriter = 
BloomFilterFactory.createDeleteBloomAtWrite(conf,
-        cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
-      deleteFamilyBloomContext =
-        new RowBloomContext(deleteFamilyBloomFilterWriter, 
fileContext.getCellComparator());
-    } else {
-      deleteFamilyBloomFilterWriter = null;
-    }
-    if (deleteFamilyBloomFilterWriter != null && LOG.isTraceEnabled()) {
-      LOG.trace("Delete Family Bloom filter type for " + path + ": "
-        + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
-    }
+  public static boolean shouldEnableHistoricalCompactionFiles(Configuration 
conf) {
+    return conf.getBoolean(ENABLE_HISTORICAL_COMPACTION_FILES,
+      DEFAULT_ENABLE_HISTORICAL_COMPACTION_FILES)
+      && conf.get(STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName())
+        .equals(DefaultStoreEngine.class.getName())
+      && conf.get(DEFAULT_COMPACTOR_CLASS_KEY, 
DefaultCompactor.class.getName())
+        .equals(DefaultCompactor.class.getName());
   }
 
   public long getPos() throws IOException {

Review Comment:
   This is where we enable dual file writing (or in other words, we enable 
generating historical compaction files) only for default store engine with 
default compactor. As we decided, other compactions,  mob, stripe, and date 
tiered will be supported later. I will have separate jiras for them. Here we 
silently ignore historical file generation for these compactions.
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@hbase.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to