Apache9 commented on code in PR #5545:
URL: https://github.com/apache/hbase/pull/5545#discussion_r1545554451
##########
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java:
##########
@@ -105,72 +132,44 @@ public class StoreFileWriter implements CellSink,
ShipperListener {
* @param fileContext The HFile context
* @param shouldDropCacheBehind Drop pages written to page cache after
writing the store file.
* @param compactedFilesSupplier Returns the {@link HStore} compacted files
which not archived
+ * @param comparator Cell comparator
+ * @param maxVersions max cell versions
+ * @param newVersionBehavior enable new version behavior
* @throws IOException problem writing to FS
*/
- private StoreFileWriter(FileSystem fs, Path path, final Configuration conf,
CacheConfig cacheConf,
- BloomType bloomType, long maxKeys, InetSocketAddress[] favoredNodes,
HFileContext fileContext,
- boolean shouldDropCacheBehind, Supplier<Collection<HStoreFile>>
compactedFilesSupplier)
- throws IOException {
+ private StoreFileWriter(FileSystem fs, Path liveFilePath, Path
historicalFilePath,
+ final Configuration conf, CacheConfig cacheConf, BloomType bloomType, long
maxKeys,
+ InetSocketAddress[] favoredNodes, HFileContext fileContext, boolean
shouldDropCacheBehind,
+ Supplier<Collection<HStoreFile>> compactedFilesSupplier, CellComparator
comparator,
+ int maxVersions, boolean newVersionBehavior) throws IOException {
+ this.fs = fs;
+ this.historicalFilePath = historicalFilePath;
+ this.conf = conf;
+ this.cacheConf = cacheConf;
+ this.bloomType = bloomType;
+ this.maxKeys = maxKeys;
+ this.favoredNodes = favoredNodes;
+ this.fileContext = fileContext;
+ this.shouldDropCacheBehind = shouldDropCacheBehind;
this.compactedFilesSupplier = compactedFilesSupplier;
- this.timeRangeTracker =
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC);
- // TODO : Change all writers to be specifically created for compaction
context
- writer =
- HFile.getWriterFactory(conf, cacheConf).withPath(fs,
path).withFavoredNodes(favoredNodes)
-
.withFileContext(fileContext).withShouldDropCacheBehind(shouldDropCacheBehind).create();
-
- generalBloomFilterWriter =
BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf,
- bloomType, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
-
- if (generalBloomFilterWriter != null) {
- this.bloomType = bloomType;
- this.bloomParam = BloomFilterUtil.getBloomFilterParam(bloomType, conf);
- if (LOG.isTraceEnabled()) {
- LOG.trace("Bloom filter type for " + path + ": " + this.bloomType + ",
param: "
- + (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH
- ? Bytes.toInt(bloomParam)
- : Bytes.toStringBinary(bloomParam))
- + ", " + generalBloomFilterWriter.getClass().getSimpleName());
- }
- // init bloom context
- switch (bloomType) {
- case ROW:
- bloomContext =
- new RowBloomContext(generalBloomFilterWriter,
fileContext.getCellComparator());
- break;
- case ROWCOL:
- bloomContext =
- new RowColBloomContext(generalBloomFilterWriter,
fileContext.getCellComparator());
- break;
- case ROWPREFIX_FIXED_LENGTH:
- bloomContext = new
RowPrefixFixedLengthBloomContext(generalBloomFilterWriter,
- fileContext.getCellComparator(), Bytes.toInt(bloomParam));
- break;
- default:
- throw new IOException(
- "Invalid Bloom filter type: " + bloomType + " (ROW or ROWCOL or
ROWPREFIX expected)");
- }
- } else {
- // Not using Bloom filters.
- this.bloomType = BloomType.NONE;
- }
+ this.comparator = comparator;
+ this.maxVersions = maxVersions;
+ this.newVersionBehavior = newVersionBehavior;
+ liveFileWriter = new SingleStoreFileWriter(fs, liveFilePath, conf,
cacheConf, bloomType,
+ maxKeys, favoredNodes, fileContext, shouldDropCacheBehind,
compactedFilesSupplier);
+ }
- // initialize delete family Bloom filter when there is NO RowCol Bloom
filter
- if (this.bloomType != BloomType.ROWCOL) {
- this.deleteFamilyBloomFilterWriter =
BloomFilterFactory.createDeleteBloomAtWrite(conf,
- cacheConf, (int) Math.min(maxKeys, Integer.MAX_VALUE), writer);
- deleteFamilyBloomContext =
- new RowBloomContext(deleteFamilyBloomFilterWriter,
fileContext.getCellComparator());
- } else {
- deleteFamilyBloomFilterWriter = null;
- }
- if (deleteFamilyBloomFilterWriter != null && LOG.isTraceEnabled()) {
- LOG.trace("Delete Family Bloom filter type for " + path + ": "
- + deleteFamilyBloomFilterWriter.getClass().getSimpleName());
- }
+ public static boolean shouldEnableHistoricalCompactionFiles(Configuration
conf) {
+ return conf.getBoolean(ENABLE_HISTORICAL_COMPACTION_FILES,
+ DEFAULT_ENABLE_HISTORICAL_COMPACTION_FILES)
+ && conf.get(STORE_ENGINE_CLASS_KEY, DefaultStoreEngine.class.getName())
+ .equals(DefaultStoreEngine.class.getName())
+ && conf.get(DEFAULT_COMPACTOR_CLASS_KEY,
DefaultCompactor.class.getName())
+ .equals(DefaultCompactor.class.getName());
}
public long getPos() throws IOException {
Review Comment:
How to silently ignore? Could you please add more comments in the code base
to describe this? And at least we should have some warnings logs to tell users
that this configuration can not be used together with your configured store
engine or compaction algorithm...
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]