chia7712 commented on code in PR #20087: URL: https://github.com/apache/kafka/pull/20087#discussion_r2190850400
########## storage/src/main/java/org/apache/kafka/storage/internals/log/LazyIndex.java: ########## @@ -100,13 +100,7 @@ public void closeHandler() { } } - private static class IndexValue<T extends AbstractIndex> implements IndexWrapper { - - private final T index; - - IndexValue(T index) { - this.index = index; - } + private record IndexValue<T extends AbstractIndex>(T index) implements IndexWrapper { Review Comment: There was an argument before for the "suitable" variables in a record class. I prefer to use record class only if all variables are immutable. That can ensure the `hashCode` and `equals` are consistent. What do you think? That is a kind of personal taste, so any feedback is welcomed ########## storage/src/main/java/org/apache/kafka/storage/internals/log/CompletedTxn.java: ########## @@ -19,50 +19,14 @@ /** * A class used to hold useful metadata about a completed transaction. This is used to build * the transaction index after appending to the log. + * + * @param producerId The ID of the producer + * @param firstOffset The first offset (inclusive) of the transaction + * @param lastOffset The last offset (inclusive) of the transaction. This is always the offset of the + * COMMIT/ABORT control record which indicates the transaction's completion. + * @param isAborted Whether the transaction was aborted */ -public class CompletedTxn { - public final long producerId; - public final long firstOffset; - public final long lastOffset; - public final boolean isAborted; - - /** - * Create an instance of this class. - * - * @param producerId The ID of the producer - * @param firstOffset The first offset (inclusive) of the transaction - * @param lastOffset The last offset (inclusive) of the transaction. This is always the offset of the - * COMMIT/ABORT control record which indicates the transaction's completion. - * @param isAborted Whether the transaction was aborted - */ - public CompletedTxn(long producerId, long firstOffset, long lastOffset, boolean isAborted) { - this.producerId = producerId; - this.firstOffset = firstOffset; - this.lastOffset = lastOffset; - this.isAborted = isAborted; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - CompletedTxn that = (CompletedTxn) o; - - return producerId == that.producerId - && firstOffset == that.firstOffset - && lastOffset == that.lastOffset - && isAborted == that.isAborted; - } - - @Override - public int hashCode() { - int result = Long.hashCode(producerId); - result = 31 * result + Long.hashCode(firstOffset); - result = 31 * result + Long.hashCode(lastOffset); - result = 31 * result + Boolean.hashCode(isAborted); - return result; - } +public record CompletedTxn(long producerId, long firstOffset, long lastOffset, boolean isAborted) { @Override public String toString() { Review Comment: ditto ########## storage/src/main/java/org/apache/kafka/server/log/remote/quota/RLMQuotaManagerConfig.java: ########## @@ -16,38 +16,16 @@ */ package org.apache.kafka.server.log.remote.quota; -public class RLMQuotaManagerConfig { +/** + * Configuration settings for quota management + * + * @param quotaBytesPerSecond The quota in bytes per second + * @param numQuotaSamples The number of samples to retain in memory + * @param quotaWindowSizeSeconds The time span of each sample + */ +public record RLMQuotaManagerConfig(long quotaBytesPerSecond, int numQuotaSamples, int quotaWindowSizeSeconds) { public static final int INACTIVE_SENSOR_EXPIRATION_TIME_SECONDS = 3600; - private final long quotaBytesPerSecond; - private final int numQuotaSamples; - private final int quotaWindowSizeSeconds; - - /** - * Configuration settings for quota management - * - * @param quotaBytesPerSecond The quota in bytes per second - * @param numQuotaSamples The number of samples to retain in memory - * @param quotaWindowSizeSeconds The time span of each sample - */ - public RLMQuotaManagerConfig(long quotaBytesPerSecond, int numQuotaSamples, int quotaWindowSizeSeconds) { - this.quotaBytesPerSecond = quotaBytesPerSecond; - this.numQuotaSamples = numQuotaSamples; - this.quotaWindowSizeSeconds = quotaWindowSizeSeconds; - } - - public long quotaBytesPerSecond() { - return quotaBytesPerSecond; - } - - public int numQuotaSamples() { - return numQuotaSamples; - } - - public int quotaWindowSizeSeconds() { - return quotaWindowSizeSeconds; - } - @Override public String toString() { Review Comment: the generated `toString` should be good enough I think ########## storage/src/main/java/org/apache/kafka/storage/internals/checkpoint/CleanShutdownFileHandler.java: ########## @@ -94,7 +94,7 @@ public OptionalLong read() { Content content = Json.parseStringAs(text, Content.class); return OptionalLong.of(content.brokerEpoch); } catch (Exception e) { - logger.debug("Fail to read the clean shutdown file in " + cleanShutdownFile.toPath() + ":" + e); + logger.debug("Fail to read the clean shutdown file in {}:{}", cleanShutdownFile.toPath(), e); Review Comment: the last parameter could be handled well if it is an exception object. Perhaps, we could use `{}` instead of `{}:{}`? ########## storage/src/main/java/org/apache/kafka/storage/internals/log/BatchMetadata.java: ########## @@ -44,28 +28,6 @@ public long firstOffset() { return lastOffset - offsetDelta; } - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - BatchMetadata that = (BatchMetadata) o; - - return lastSeq == that.lastSeq && - lastOffset == that.lastOffset && - offsetDelta == that.offsetDelta && - timestamp == that.timestamp; - } - - @Override - public int hashCode() { - int result = lastSeq; - result = 31 * result + Long.hashCode(lastOffset); - result = 31 * result + offsetDelta; - result = 31 * result + Long.hashCode(timestamp); - return result; - } - @Override public String toString() { Review Comment: ditto -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org