OneSizeFitsQuorum commented on code in PR #11228:
URL: https://github.com/apache/iotdb/pull/11228#discussion_r1338510233
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/service/metrics/FileMetrics.java:
##########
@@ -66,18 +67,49 @@ public class FileMetrics implements IMetricSet {
private static final String SEQUENCE = "sequence";
private static final String UNSEQUENCE = "unsequence";
private static final String LEVEL = "level";
- private final AtomicLong seqFileSize = new AtomicLong(0);
- private final AtomicLong unseqFileSize = new AtomicLong(0);
- private final AtomicInteger seqFileNum = new AtomicInteger(0);
- private final AtomicInteger unseqFileNum = new AtomicInteger(0);
+ // database -> regionId -> sequence file size
+ private final Map<String, Map<String, Long>> seqFileSizeMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> unsequence file size
+ private final Map<String, Map<String, Long>> unseqFileSizeMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> sequence file number
+ private final Map<String, Map<String, Integer>> seqFileNumMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> unsequence file number
+ private final Map<String, Map<String, Integer>> unseqFileNumMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> sequence file size gauge
Review Comment:
It seems that we need to maintain this map when we delete regions, and we
need to delete the database if its corresponding region map is empty
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/service/metrics/FileMetrics.java:
##########
@@ -333,149 +331,236 @@ private long getOpenFileHandlersNumber() {
}
// following are update functions for tsfile metrics
- public void addFile(long size, boolean seq, String name) {
- updateGlobalCountAndSize(size, 1, seq);
+ public void addFile(String database, String regionId, long size, boolean
seq, String name) {
+ updateGlobalCountAndSize(database, regionId, size, 1, seq);
try {
TsFileNameGenerator.TsFileName tsFileName =
TsFileNameGenerator.getTsFileName(name);
int level = tsFileName.getInnerCompactionCnt();
- updateLevelCountAndSize(size, 1, seq, level);
+ updateLevelCountAndSize(size, 1, seq, level, database, regionId);
Review Comment:
Put the database and regionID arguments first, just like all the other
functions such as `updateGlobalCountAndSize`
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/service/metrics/FileMetrics.java:
##########
@@ -333,149 +331,236 @@ private long getOpenFileHandlersNumber() {
}
// following are update functions for tsfile metrics
- public void addFile(long size, boolean seq, String name) {
- updateGlobalCountAndSize(size, 1, seq);
+ public void addFile(String database, String regionId, long size, boolean
seq, String name) {
+ updateGlobalCountAndSize(database, regionId, size, 1, seq);
try {
TsFileNameGenerator.TsFileName tsFileName =
TsFileNameGenerator.getTsFileName(name);
int level = tsFileName.getInnerCompactionCnt();
- updateLevelCountAndSize(size, 1, seq, level);
+ updateLevelCountAndSize(size, 1, seq, level, database, regionId);
} catch (IOException e) {
log.warn("Unexpected error occurred when getting tsfile name", e);
}
}
- private void updateGlobalCountAndSize(long sizeDelta, int countDelta,
boolean seq) {
- if (seq) {
- seqFileSize.getAndAdd(sizeDelta);
- seqFileNum.getAndAdd(countDelta);
- } else {
- unseqFileSize.getAndAdd(sizeDelta);
- unseqFileNum.getAndAdd(countDelta);
- }
- }
-
- private void updateLevelCountAndSize(long sizeDelta, int countDelta, boolean
seq, int level) {
- int count = 0;
- long totalSize = 0;
- if (seq) {
- count =
- seqLevelTsFileCountMap.compute(level, (k, v) -> v == null ?
countDelta : v + countDelta);
- totalSize =
- seqLevelTsFileSizeMap.compute(level, (k, v) -> v == null ? sizeDelta
: v + sizeDelta);
- } else {
- count =
- unseqLevelTsFileCountMap.compute(
- level, (k, v) -> v == null ? countDelta : v + countDelta);
- totalSize =
- unseqLevelTsFileSizeMap.compute(level, (k, v) -> v == null ?
sizeDelta : v + sizeDelta);
+ private void updateGlobalCountAndSize(
+ String database, String regionId, long sizeDelta, int countDelta,
boolean seq) {
+ long fileSize =
+ (seq ? seqFileSizeMap : unseqFileSizeMap)
+ .compute(
+ database,
+ (k, v) -> {
+ long size = 0;
+ if (v == null) {
+ v = new HashMap<>();
+ } else if (v.containsKey(regionId)) {
+ size = v.get(regionId);
+ }
+ v.put(regionId, size + sizeDelta);
+ return v;
+ })
+ .get(regionId);
+ long fileNum =
Review Comment:
how about this? BTW, We don't need to pull out a separate function, but it
will save us a lot of hashing and allow us to update different regions in
parallel within the same database
<img width="498" alt="image"
src="https://github.com/apache/iotdb/assets/32640567/3b16d3c0-3e0a-4512-9126-2dac04ac48f9">
##########
iotdb-core/datanode/src/main/java/org/apache/iotdb/db/service/metrics/FileMetrics.java:
##########
@@ -66,18 +67,49 @@ public class FileMetrics implements IMetricSet {
private static final String SEQUENCE = "sequence";
private static final String UNSEQUENCE = "unsequence";
private static final String LEVEL = "level";
- private final AtomicLong seqFileSize = new AtomicLong(0);
- private final AtomicLong unseqFileSize = new AtomicLong(0);
- private final AtomicInteger seqFileNum = new AtomicInteger(0);
- private final AtomicInteger unseqFileNum = new AtomicInteger(0);
+ // database -> regionId -> sequence file size
+ private final Map<String, Map<String, Long>> seqFileSizeMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> unsequence file size
+ private final Map<String, Map<String, Long>> unseqFileSizeMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> sequence file number
+ private final Map<String, Map<String, Integer>> seqFileNumMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> unsequence file number
+ private final Map<String, Map<String, Integer>> unseqFileNumMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> sequence file size gauge
+ private final Map<String, Map<String, Gauge>> seqFileSizeGaugeMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> unsequence file size gauge
+ private final Map<String, Map<String, Gauge>> unseqFileSizeGaugeMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> sequence file number gauge
+ private final Map<String, Map<String, Gauge>> seqFileNumGaugeMap = new
ConcurrentHashMap<>();
+ // database -> regionId -> unsequence file number gauge
+ private final Map<String, Map<String, Gauge>> unseqFileNumGaugeMap = new
ConcurrentHashMap<>();
private final AtomicInteger modFileNum = new AtomicInteger(0);
-
private final AtomicLong modFileSize = new AtomicLong(0);
- private final Map<Integer, Integer> seqLevelTsFileCountMap = new
ConcurrentHashMap<>();
- private final Map<Integer, Integer> unseqLevelTsFileCountMap = new
ConcurrentHashMap<>();
- private final Map<Integer, Long> seqLevelTsFileSizeMap = new
ConcurrentHashMap<>();
- private final Map<Integer, Long> unseqLevelTsFileSizeMap = new
ConcurrentHashMap<>();
+ // level -> database -> regionId -> sequence file number
+ private final Map<Integer, Map<String, Map<String, Integer>>>
seqLevelTsFileCountMap =
Review Comment:
It feels like we don't need to differentiate between different levels of
file sizes for different databases and regions.
Because the above part is enough for us to do storage resource load balancing
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]