This is an automated email from the ASF dual-hosted git repository.

haonan pushed a commit to branch win_metrics_new
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit 624b7cfba43c507220f345b6f415d50ac0ffafff
Author: unknown <[email protected]>
AuthorDate: Thu Mar 12 16:38:06 2026 +0800

    fix
---
 .../metricsets/disk/IDiskMetricsManager.java       |   2 +-
 .../disk/NewWindowsDiskMetricsManager.java         |  99 ++--
 .../metricsets/disk/WindowsDiskMetricsManager.java | 511 +++++++++++++++++----
 3 files changed, 459 insertions(+), 153 deletions(-)

diff --git 
a/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/IDiskMetricsManager.java
 
b/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/IDiskMetricsManager.java
index d56ac03cc6a..83a9f311223 100644
--- 
a/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/IDiskMetricsManager.java
+++ 
b/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/IDiskMetricsManager.java
@@ -125,7 +125,7 @@ public interface IDiskMetricsManager {
       String os = System.getProperty("os.name").toLowerCase();
 
       if (os.startsWith("windows")) {
-        return new NewWindowsDiskMetricsManager();
+        return new WindowsDiskMetricsManager();
       } else if (os.startsWith("linux")) {
         return new LinuxDiskMetricsManager();
       } else {
diff --git 
a/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/NewWindowsDiskMetricsManager.java
 
b/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/NewWindowsDiskMetricsManager.java
index 9630ce180b8..c18112cd66f 100644
--- 
a/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/NewWindowsDiskMetricsManager.java
+++ 
b/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/NewWindowsDiskMetricsManager.java
@@ -36,7 +36,8 @@ import java.util.Map;
 public class NewWindowsDiskMetricsManager extends AbstractDiskMetricsManager {
   private static final Logger LOGGER = 
LoggerFactory.getLogger(NewWindowsDiskMetricsManager.class);
 
-  private static final MetricConfig metricConfig = 
MetricConfigDescriptor.getInstance().getMetricConfig();
+  private static final MetricConfig metricConfig =
+      MetricConfigDescriptor.getInstance().getMetricConfig();
 
   private static final int DEFAULT_SECTOR_SIZE = 512;
   private final Map<String, Integer> diskSectorSizeMap = new HashMap<>();
@@ -281,57 +282,51 @@ public class NewWindowsDiskMetricsManager extends 
AbstractDiskMetricsManager {
     Map<String, Long> tempWriteTimeCost = queryCounterLong("Avg. Disk 
sec/Write");
     Map<String, Long> tempIoBusyTime = queryCounterLong("% Disk Time"); // 
忙碌时间百分比
     Map<String, Long> tempTimeInQueue = queryCounterLong("Avg. Disk Queue 
Length"); // 队列长度,乘时间估算
-      for (String diskId : diskIdSet) {
-        updateSingleDiskInfo(
-            diskId,
-            adjustToSector(tempReadSectorCount).getOrDefault(diskId, 0L),
-            lastReadSectorCountForDisk,
-            incrementReadSectorCountForDisk);
-        updateSingleDiskInfo(
-            diskId,
-            adjustToSector(tempWriteSectorCount).getOrDefault(diskId, 0L),
-            lastWriteSectorCountForDisk,
-            incrementWriteSectorCountForDisk);
-        updateSingleDiskInfo(
-            diskId,
-            tempReadOperationCount.getOrDefault(diskId, 0L),
-            lastReadOperationCountForDisk,
-            incrementReadOperationCountForDisk);
-        updateSingleDiskInfo(
-            diskId,
-            tempWriteOperationCount.getOrDefault(diskId, 0L),
-            lastWriteOperationCountForDisk,
-            incrementWriteOperationCountForDisk);
-        updateSingleDiskInfo(
-            diskId,
-            0L,
-            lastMergedReadCountForDisk,
-            incrementMergedReadOperationCountForDisk);
-        updateSingleDiskInfo(
-            diskId,
-            0L,
-            lastMergedWriteCountForDisk,
-            incrementMergedWriteOperationCountForDisk);
-        updateSingleDiskInfo(
-            diskId,
-            tempReadTimeCost.getOrDefault(diskId, 0L),
-            lastReadTimeCostForDisk,
-            incrementReadTimeCostForDisk);
-        updateSingleDiskInfo(
-            diskId,
-            tempWriteTimeCost.getOrDefault(diskId, 0L),
-            lastWriteTimeCostForDisk,
-            incrementWriteTimeCostForDisk);
-        updateSingleDiskInfo(
-            diskId,
-            tempIoBusyTime.getOrDefault(diskId, 0L),
-            lastIoBusyTimeForDisk,
-            incrementIoBusyTimeForDisk);
-        updateSingleDiskInfo(
-            diskId,
-            tempTimeInQueue.getOrDefault(diskId, 0L),
-            lastTimeInQueueForDisk,
-            incrementTimeInQueueForDisk);
+    for (String diskId : diskIdSet) {
+      updateSingleDiskInfo(
+          diskId,
+          adjustToSector(tempReadSectorCount).getOrDefault(diskId, 0L),
+          lastReadSectorCountForDisk,
+          incrementReadSectorCountForDisk);
+      updateSingleDiskInfo(
+          diskId,
+          adjustToSector(tempWriteSectorCount).getOrDefault(diskId, 0L),
+          lastWriteSectorCountForDisk,
+          incrementWriteSectorCountForDisk);
+      updateSingleDiskInfo(
+          diskId,
+          tempReadOperationCount.getOrDefault(diskId, 0L),
+          lastReadOperationCountForDisk,
+          incrementReadOperationCountForDisk);
+      updateSingleDiskInfo(
+          diskId,
+          tempWriteOperationCount.getOrDefault(diskId, 0L),
+          lastWriteOperationCountForDisk,
+          incrementWriteOperationCountForDisk);
+      updateSingleDiskInfo(
+          diskId, 0L, lastMergedReadCountForDisk, 
incrementMergedReadOperationCountForDisk);
+      updateSingleDiskInfo(
+          diskId, 0L, lastMergedWriteCountForDisk, 
incrementMergedWriteOperationCountForDisk);
+      updateSingleDiskInfo(
+          diskId,
+          tempReadTimeCost.getOrDefault(diskId, 0L),
+          lastReadTimeCostForDisk,
+          incrementReadTimeCostForDisk);
+      updateSingleDiskInfo(
+          diskId,
+          tempWriteTimeCost.getOrDefault(diskId, 0L),
+          lastWriteTimeCostForDisk,
+          incrementWriteTimeCostForDisk);
+      updateSingleDiskInfo(
+          diskId,
+          tempIoBusyTime.getOrDefault(diskId, 0L),
+          lastIoBusyTimeForDisk,
+          incrementIoBusyTimeForDisk);
+      updateSingleDiskInfo(
+          diskId,
+          tempTimeInQueue.getOrDefault(diskId, 0L),
+          lastTimeInQueueForDisk,
+          incrementTimeInQueueForDisk);
     }
   }
 
diff --git 
a/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/WindowsDiskMetricsManager.java
 
b/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/WindowsDiskMetricsManager.java
index b8961bd9b9c..88b8a398be3 100644
--- 
a/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/WindowsDiskMetricsManager.java
+++ 
b/iotdb-core/metrics/interface/src/main/java/org/apache/iotdb/metrics/metricsets/disk/WindowsDiskMetricsManager.java
@@ -19,172 +19,483 @@
 
 package org.apache.iotdb.metrics.metricsets.disk;
 
-import oshi.SystemInfo;
-import oshi.hardware.HWDiskStore;
-import oshi.software.os.OSProcess;
+import org.apache.iotdb.metrics.config.MetricConfigDescriptor;
 
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.stream.Collectors;
+import java.util.Set;
+
+/**
+ * Disk metrics manager for Windows system.
+ *
+ * <p>Windows does not expose Linux-like cumulative counters through procfs, 
so this implementation
+ * periodically samples Win32 performance counters and accumulates the 
observed per-second values
+ * into totals that match the Linux manager contract as closely as possible.
+ */
+public class WindowsDiskMetricsManager implements IDiskMetricsManager {
+  private static final Logger LOGGER = 
LoggerFactory.getLogger(WindowsDiskMetricsManager.class);
 
-/** Disk Metrics Manager for Windows system, not implemented yet. */
-@SuppressWarnings({"rawtypes", "unchecked"})
-public class WindowsDiskMetricsManager extends AbstractDiskMetricsManager {
+  private static final double BYTES_PER_KB = 1024.0;
+  private static final long UPDATE_SMALLEST_INTERVAL = 10000L;
+  private static final String POWER_SHELL = "powershell";
+  private static final String POWER_SHELL_NO_PROFILE = "-NoProfile";
+  private static final String POWER_SHELL_COMMAND = "-Command";
+  private static final String TOTAL_DISK_INSTANCE = "_Total";
+  private static final Charset WINDOWS_SHELL_CHARSET = 
getWindowsShellCharset();
+  private static final String DISK_QUERY =
+      "Get-CimInstance Win32_PerfFormattedData_PerfDisk_PhysicalDisk | "
+          + "Where-Object { $_.Name -ne '_Total' } | "
+          + "ForEach-Object { "
+          + "[string]::Concat("
+          + "$_.Name, [char]9, "
+          + "$_.DiskReadsPerSec, [char]9, "
+          + "$_.DiskWritesPerSec, [char]9, "
+          + "$_.DiskReadBytesPerSec, [char]9, "
+          + "$_.DiskWriteBytesPerSec, [char]9, "
+          + "$_.AvgDisksecPerRead, [char]9, "
+          + "$_.AvgDisksecPerWrite, [char]9, "
+          + "$_.PercentIdleTime, [char]9, "
+          + "$_.AvgDiskQueueLength) }";
+  private static final String PROCESS_QUERY_TEMPLATE =
+      "Get-CimInstance Win32_PerfFormattedData_PerfProc_Process | "
+          + "Where-Object { $_.IDProcess -eq %s } | "
+          + "ForEach-Object { "
+          + "[string]::Concat("
+          + "$_.IOReadOperationsPerSec, [char]9, "
+          + "$_.IOWriteOperationsPerSec, [char]9, "
+          + "$_.IOReadBytesPerSec, [char]9, "
+          + "$_.IOWriteBytesPerSec) }";
 
-  private final SystemInfo systemInfo = new SystemInfo();
-  private final OSProcess thisProcess;
-  private List<HWDiskStore> diskStores;
+  private final String processId;
+  private final Set<String> diskIdSet = new HashSet<>();
+
+  private long lastUpdateTime = 0L;
+  private long updateInterval = 1L;
+
+  private final Map<String, Long> lastReadOperationCountForDisk = new 
HashMap<>();
+  private final Map<String, Long> lastWriteOperationCountForDisk = new 
HashMap<>();
+  private final Map<String, Long> lastReadTimeCostForDisk = new HashMap<>();
+  private final Map<String, Long> lastWriteTimeCostForDisk = new HashMap<>();
+  private final Map<String, Long> lastMergedReadCountForDisk = new HashMap<>();
+  private final Map<String, Long> lastMergedWriteCountForDisk = new 
HashMap<>();
+  private final Map<String, Long> lastReadSizeForDisk = new HashMap<>();
+  private final Map<String, Long> lastWriteSizeForDisk = new HashMap<>();
+  private final Map<String, Double> lastIoUtilsPercentageForDisk = new 
HashMap<>();
+  private final Map<String, Double> lastQueueSizeForDisk = new HashMap<>();
+  private final Map<String, Double> lastAvgReadCostTimeOfEachOpsForDisk = new 
HashMap<>();
+  private final Map<String, Double> lastAvgWriteCostTimeOfEachOpsForDisk = new 
HashMap<>();
+  private final Map<String, Double> lastAvgSizeOfEachReadForDisk = new 
HashMap<>();
+  private final Map<String, Double> lastAvgSizeOfEachWriteForDisk = new 
HashMap<>();
+
+  private long lastReallyReadSizeForProcess = 0L;
+  private long lastReallyWriteSizeForProcess = 0L;
+  private long lastAttemptReadSizeForProcess = 0L;
+  private long lastAttemptWriteSizeForProcess = 0L;
+  private long lastReadOpsCountForProcess = 0L;
+  private long lastWriteOpsCountForProcess = 0L;
 
   public WindowsDiskMetricsManager() {
-    thisProcess = systemInfo.getOperatingSystem().getCurrentProcess();
-    diskStores = systemInfo.getHardware().getDiskStores();
-    init();
+    processId = 
String.valueOf(MetricConfigDescriptor.getInstance().getMetricConfig().getPid());
+    collectDiskId();
   }
 
   @Override
   public Map<String, Double> getReadDataSizeForDisk() {
     checkUpdate();
-    Map<String, Double> result = new HashMap<>();
-    diskStores.forEach(
-        disk -> {
-          result.put(this.getDisplayName(disk), (double) disk.getReadBytes() / 
BYTES_PER_KB);
-        });
-    return result;
+    return toKbMap(lastReadSizeForDisk);
   }
 
   @Override
   public Map<String, Double> getWriteDataSizeForDisk() {
     checkUpdate();
-    Map<String, Double> result = new HashMap<>();
-    diskStores.forEach(
-        disk -> {
-          result.put(this.getDisplayName(disk), (double) disk.getWriteBytes() 
/ BYTES_PER_KB);
-        });
-    return result;
+    return toKbMap(lastWriteSizeForDisk);
   }
 
   @Override
   public Map<String, Long> getReadOperationCountForDisk() {
     checkUpdate();
-    Map<String, Long> result = new HashMap<>();
-    diskStores.forEach(
-        disk -> {
-          result.put(this.getDisplayName(disk), disk.getReads());
-        });
-    return result;
+    return lastReadOperationCountForDisk;
   }
 
   @Override
   public Map<String, Long> getWriteOperationCountForDisk() {
     checkUpdate();
-    Map<String, Long> result = new HashMap<>();
-    diskStores.forEach(
-        disk -> {
-          result.put(this.getDisplayName(disk), disk.getWrites());
-        });
-    return result;
+    return lastWriteOperationCountForDisk;
   }
 
-  private Map<String, Long> getTransferTimesForDisk() {
+  @Override
+  public Map<String, Long> getReadCostTimeForDisk() {
     checkUpdate();
-    Map<String, Long> result = new HashMap<>();
-    diskStores.forEach(
-        disk -> {
-          result.put(this.getDisplayName(disk), disk.getTransferTime());
-        });
-    return result;
+    return lastReadTimeCostForDisk;
+  }
+
+  @Override
+  public Map<String, Long> getWriteCostTimeForDisk() {
+    checkUpdate();
+    return lastWriteTimeCostForDisk;
+  }
+
+  @Override
+  public Map<String, Double> getIoUtilsPercentage() {
+    checkUpdate();
+    return lastIoUtilsPercentageForDisk;
+  }
+
+  @Override
+  public Map<String, Double> getAvgReadCostTimeOfEachOpsForDisk() {
+    checkUpdate();
+    return lastAvgReadCostTimeOfEachOpsForDisk;
+  }
+
+  @Override
+  public Map<String, Double> getAvgWriteCostTimeOfEachOpsForDisk() {
+    checkUpdate();
+    return lastAvgWriteCostTimeOfEachOpsForDisk;
+  }
+
+  @Override
+  public Map<String, Double> getAvgSizeOfEachReadForDisk() {
+    checkUpdate();
+    return lastAvgSizeOfEachReadForDisk;
+  }
+
+  @Override
+  public Map<String, Double> getAvgSizeOfEachWriteForDisk() {
+    checkUpdate();
+    return lastAvgSizeOfEachWriteForDisk;
+  }
+
+  @Override
+  public Map<String, Long> getMergedWriteOperationForDisk() {
+    checkUpdate();
+    return lastMergedWriteCountForDisk;
+  }
+
+  @Override
+  public Map<String, Long> getMergedReadOperationForDisk() {
+    checkUpdate();
+    return lastMergedReadCountForDisk;
   }
 
   @Override
   public Map<String, Double> getQueueSizeForDisk() {
     checkUpdate();
-    Map<String, Double> result = new HashMap<>();
-    diskStores.forEach(
-        disk -> {
-          result.put(this.getDisplayName(disk), (double) 
disk.getCurrentQueueLength());
-        });
-    return result;
+    return lastQueueSizeForDisk;
   }
 
   @Override
   public double getActualReadDataSizeForProcess() {
-    return thisProcess.getBytesRead() / BYTES_PER_KB;
+    checkUpdate();
+    return lastReallyReadSizeForProcess / BYTES_PER_KB;
   }
 
   @Override
   public double getActualWriteDataSizeForProcess() {
-    return thisProcess.getBytesWritten() / BYTES_PER_KB;
+    checkUpdate();
+    return lastReallyWriteSizeForProcess / BYTES_PER_KB;
   }
 
   @Override
-  public Map<String, Double> getAvgSizeOfEachReadForDisk() {
+  public long getReadOpsCountForProcess() {
     checkUpdate();
-    Map<String, Double> result = new 
HashMap<>(incrementReadSizeForDisk.size());
-    for (Map.Entry<String, Long> incrementReadSize : 
incrementReadSizeForDisk.entrySet()) {
-      // use Long.max to avoid NaN
-      long readOpsCount =
-          Long.max(
-              
incrementReadOperationCountForDisk.getOrDefault(incrementReadSize.getKey(), 
1L), 1L);
-      result.put(
-          incrementReadSize.getKey(), ((double) incrementReadSize.getValue()) 
/ readOpsCount);
-    }
-    return result;
+    return lastReadOpsCountForProcess;
   }
 
   @Override
-  public Map<String, Double> getAvgSizeOfEachWriteForDisk() {
+  public long getWriteOpsCountForProcess() {
+    checkUpdate();
+    return lastWriteOpsCountForProcess;
+  }
+
+  @Override
+  public double getAttemptReadSizeForProcess() {
+    checkUpdate();
+    return lastAttemptReadSizeForProcess / BYTES_PER_KB;
+  }
+
+  @Override
+  public double getAttemptWriteSizeForProcess() {
+    checkUpdate();
+    return lastAttemptWriteSizeForProcess / BYTES_PER_KB;
+  }
+
+  @Override
+  public Set<String> getDiskIds() {
     checkUpdate();
-    Map<String, Double> result = new 
HashMap<>(incrementWriteSizeForDisk.size());
-    for (Map.Entry<String, Long> incrementReadSize : 
incrementWriteSizeForDisk.entrySet()) {
-      // use Long.max to avoid NaN
-      long readOpsCount =
-          Long.max(
-              
incrementWriteOperationCountForDisk.getOrDefault(incrementReadSize.getKey(), 
1L), 1L);
-      result.put(
-          incrementReadSize.getKey(), ((double) incrementReadSize.getValue()) 
/ readOpsCount);
+    return diskIdSet;
+  }
+
+  private void collectDiskId() {
+    Map<String, String[]> diskInfoMap = queryDiskInfo();
+    if (diskInfoMap.isEmpty()) {
+      return;
+    }
+    diskIdSet.clear();
+    diskIdSet.addAll(diskInfoMap.keySet());
+  }
+
+  private Map<String, Double> toKbMap(Map<String, Long> source) {
+    Map<String, Double> result = new HashMap<>(source.size());
+    for (Map.Entry<String, Long> entry : source.entrySet()) {
+      result.put(entry.getKey(), entry.getValue() / BYTES_PER_KB);
     }
     return result;
   }
 
-  protected void updateInfo() {
-    super.updateInfo();
+  private void updateInfo() {
+    long currentTime = System.currentTimeMillis();
+    updateInterval = lastUpdateTime == 0L ? 0L : currentTime - lastUpdateTime;
+    lastUpdateTime = currentTime;
     updateDiskInfo();
+    updateProcessInfo();
   }
 
   private void updateDiskInfo() {
-    diskStores = systemInfo.getHardware().getDiskStores();
+    Map<String, String[]> diskInfoMap = queryDiskInfo();
+    if (diskInfoMap.isEmpty()) {
+      return;
+    }
+
+    diskIdSet.clear();
+    diskIdSet.addAll(diskInfoMap.keySet());
+
+    for (Map.Entry<String, String[]> entry : diskInfoMap.entrySet()) {
+      String diskId = entry.getKey();
+      String[] diskInfo = entry.getValue();
+      long readOpsPerSec = parseLong(diskInfo[0]);
+      long writeOpsPerSec = parseLong(diskInfo[1]);
+      long readBytesPerSec = parseLong(diskInfo[2]);
+      long writeBytesPerSec = parseLong(diskInfo[3]);
+      double avgDiskSecPerRead = parseDouble(diskInfo[4]);
+      double avgDiskSecPerWrite = parseDouble(diskInfo[5]);
+      double percentIdleTime = parseDouble(diskInfo[6]);
+      double avgDiskQueueLength = parseDouble(diskInfo[7]);
 
-    Map[] currentMapArray = {
-      getTransferTimesForDisk(), getReadDataSizeForDisk(), 
getWriteDataSizeForDisk(),
-    };
-    Map[] lastMapArray = {
-      lastIoBusyTimeForDisk, lastReadSizeForDisk, lastWriteSizeForDisk,
-    };
-    Map[] incrementMapArray = {
-      incrementIoBusyTimeForDisk, incrementReadSizeForDisk, 
incrementWriteSizeForDisk,
-    };
+      long intervalMillis = updateInterval;
+      lastReadOperationCountForDisk.put(
+          diskId,
+          accumulate(lastReadOperationCountForDisk.get(diskId), readOpsPerSec, 
intervalMillis));
+      lastWriteOperationCountForDisk.put(
+          diskId,
+          accumulate(lastWriteOperationCountForDisk.get(diskId), 
writeOpsPerSec, intervalMillis));
+      lastMergedReadCountForDisk.put(diskId, 0L);
+      lastMergedWriteCountForDisk.put(diskId, 0L);
+      lastReadSizeForDisk.put(
+          diskId, accumulate(lastReadSizeForDisk.get(diskId), readBytesPerSec, 
intervalMillis));
+      lastWriteSizeForDisk.put(
+          diskId, accumulate(lastWriteSizeForDisk.get(diskId), 
writeBytesPerSec, intervalMillis));
+      lastReadTimeCostForDisk.put(
+          diskId,
+          accumulateTimeCost(
+              lastReadTimeCostForDisk.get(diskId),
+              avgDiskSecPerRead,
+              readOpsPerSec,
+              intervalMillis));
+      lastWriteTimeCostForDisk.put(
+          diskId,
+          accumulateTimeCost(
+              lastWriteTimeCostForDisk.get(diskId),
+              avgDiskSecPerWrite,
+              writeOpsPerSec,
+              intervalMillis));
+      lastIoUtilsPercentageForDisk.put(diskId, clampPercentage(1.0 - 
percentIdleTime / 100.0));
+      lastQueueSizeForDisk.put(diskId, avgDiskQueueLength);
+      lastAvgReadCostTimeOfEachOpsForDisk.put(diskId, avgDiskSecPerRead * 
1000.0);
+      lastAvgWriteCostTimeOfEachOpsForDisk.put(diskId, avgDiskSecPerWrite * 
1000.0);
+      lastAvgSizeOfEachReadForDisk.put(
+          diskId, readOpsPerSec == 0 ? 0.0 : ((double) readBytesPerSec) / 
readOpsPerSec);
+      lastAvgSizeOfEachWriteForDisk.put(
+          diskId, writeOpsPerSec == 0 ? 0.0 : ((double) writeBytesPerSec) / 
writeOpsPerSec);
+    }
+  }
+
+  private void updateProcessInfo() {
+    String processInfo = queryProcessInfo();
+    if (processInfo == null || processInfo.isEmpty()) {
+      return;
+    }
 
-    for (int i = 0; i < currentMapArray.length; i++) {
-      Map map = currentMapArray[i];
-      int finalI = i;
-      map.forEach(
-          (key, value) -> {
-            updateSingleDiskInfo(
-                (String) key,
-                ((Number) value).longValue(),
-                lastMapArray[finalI],
-                incrementMapArray[finalI]);
-          });
+    String[] processMetricArray = processInfo.split("\t");
+    if (processMetricArray.length < 4) {
+      LOGGER.warn("Unexpected windows process io info format: {}", 
processInfo);
+      return;
     }
+
+    long readOpsPerSec = parseLong(processMetricArray[0]);
+    long writeOpsPerSec = parseLong(processMetricArray[1]);
+    long readBytesPerSec = parseLong(processMetricArray[2]);
+    long writeBytesPerSec = parseLong(processMetricArray[3]);
+
+    lastReadOpsCountForProcess =
+        accumulate(lastReadOpsCountForProcess, readOpsPerSec, updateInterval);
+    lastWriteOpsCountForProcess =
+        accumulate(lastWriteOpsCountForProcess, writeOpsPerSec, 
updateInterval);
+    lastReallyReadSizeForProcess =
+        accumulate(lastReallyReadSizeForProcess, readBytesPerSec, 
updateInterval);
+    lastReallyWriteSizeForProcess =
+        accumulate(lastReallyWriteSizeForProcess, writeBytesPerSec, 
updateInterval);
+
+    // Windows does not expose attempted read/write sizes directly in these 
counters.
+    lastAttemptReadSizeForProcess = lastReallyReadSizeForProcess;
+    lastAttemptWriteSizeForProcess = lastReallyWriteSizeForProcess;
   }
 
-  private String getDisplayName(HWDiskStore disk) {
-    return disk.getName() + "-" + disk.getModel();
+  private Map<String, String[]> queryDiskInfo() {
+    Map<String, String[]> result = new HashMap<>();
+    for (String line : executePowerShell(DISK_QUERY)) {
+      if (line == null || line.isEmpty()) {
+        continue;
+      }
+      String[] values = line.split("\t");
+      if (values.length < 9) {
+        LOGGER.warn("Unexpected windows disk io info format: {}", line);
+        continue;
+      }
+      String diskId = values[0].trim();
+      if (diskId.isEmpty() || TOTAL_DISK_INSTANCE.equals(diskId)) {
+        continue;
+      }
+      String[] metricArray = new String[8];
+      System.arraycopy(values, 1, metricArray, 0, metricArray.length);
+      result.put(diskId, metricArray);
+    }
+    return result;
   }
 
-  @Override
-  protected void collectDiskId() {
-    diskIdSet = 
diskStores.stream().map(this::getDisplayName).collect(Collectors.toSet());
+  private String queryProcessInfo() {
+    for (String line :
+        executePowerShell(
+            String.format(PROCESS_QUERY_TEMPLATE, 
escapeSingleQuotedPowerShell(processId)))) {
+      if (line != null && !line.isEmpty()) {
+        return line;
+      }
+    }
+    return null;
+  }
+
+  private String escapeSingleQuotedPowerShell(String value) {
+    return value.replace("'", "''");
+  }
+
+  private long accumulate(Long previousValue, long valuePerSec, long 
intervalMillis) {
+    if (intervalMillis <= 0L) {
+      return previousValue == null ? 0L : previousValue;
+    }
+    return (previousValue == null ? 0L : previousValue) + valuePerSec * 
intervalMillis / 1000L;
+  }
+
+  private long accumulate(long previousValue, long valuePerSec, long 
intervalMillis) {
+    if (intervalMillis <= 0L) {
+      return previousValue;
+    }
+    return previousValue + valuePerSec * intervalMillis / 1000L;
+  }
+
+  private long accumulateTimeCost(
+      Long previousValue, double avgTimeInSecond, long opsPerSec, long 
intervalMillis) {
+    if (intervalMillis <= 0L) {
+      return previousValue == null ? 0L : previousValue;
+    }
+    long previous = previousValue == null ? 0L : previousValue;
+    double operationCount = opsPerSec * intervalMillis / 1000.0;
+    return previous + Math.round(avgTimeInSecond * operationCount * 1000.0);
+  }
+
+  private long parseLong(String value) {
+    try {
+      return Math.round(Double.parseDouble(value.trim()));
+    } catch (NumberFormatException e) {
+      LOGGER.warn("Failed to parse long value from windows disk metrics: {}", 
value, e);
+      return 0L;
+    }
+  }
+
+  private double parseDouble(String value) {
+    try {
+      return Double.parseDouble(value.trim());
+    } catch (NumberFormatException e) {
+      LOGGER.warn("Failed to parse double value from windows disk metrics: 
{}", value, e);
+      return 0.0;
+    }
+  }
+
+  private double clampPercentage(double value) {
+    return Math.max(0.0, Math.min(100.0, value));
+  }
+
+  private List<String> executePowerShell(String command) {
+    List<String> result = new ArrayList<>();
+    List<String> rawOutput = new ArrayList<>();
+    Process process = null;
+    try {
+      process =
+          new ProcessBuilder(POWER_SHELL, POWER_SHELL_NO_PROFILE, 
POWER_SHELL_COMMAND, command)
+              .redirectErrorStream(true)
+              .start();
+      try (BufferedReader reader =
+          new BufferedReader(
+              new InputStreamReader(process.getInputStream(), 
WINDOWS_SHELL_CHARSET))) {
+        String line;
+        while ((line = reader.readLine()) != null) {
+          String trimmedLine = line.trim();
+          if (!trimmedLine.isEmpty()) {
+            rawOutput.add(trimmedLine);
+          }
+        }
+      }
+      int exitCode = process.waitFor();
+      if (exitCode != 0) {
+        LOGGER.warn(
+            "Failed to collect windows disk metrics, powershell exit code: {}, 
command {}, output {}",
+            exitCode,
+            command,
+            String.join(" | ", rawOutput));
+      } else {
+        result.addAll(rawOutput);
+      }
+    } catch (IOException e) {
+      LOGGER.warn("Failed to execute powershell for windows disk metrics", e);
+    } catch (InterruptedException e) {
+      Thread.currentThread().interrupt();
+      LOGGER.warn("Interrupted while collecting windows disk metrics", e);
+    } finally {
+      if (process != null) {
+        process.destroy();
+      }
+    }
+    return result;
+  }
+
+  private static Charset getWindowsShellCharset() {
+    String nativeEncoding = System.getProperty("sun.jnu.encoding");
+    if (nativeEncoding != null && Charset.isSupported(nativeEncoding)) {
+      return Charset.forName(nativeEncoding);
+    }
+
+    String fileEncoding = System.getProperty("file.encoding");
+    if (fileEncoding != null && Charset.isSupported(fileEncoding)) {
+      return Charset.forName(fileEncoding);
+    }
+
+    if (Charset.isSupported("GBK")) {
+      return Charset.forName("GBK");
+    }
+    return Charset.defaultCharset();
+  }
+
+  private void checkUpdate() {
+    if (System.currentTimeMillis() - lastUpdateTime > 
UPDATE_SMALLEST_INTERVAL) {
+      updateInfo();
+    }
   }
 }

Reply via email to