This is an automated email from the ASF dual-hosted git repository.
epugh pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/solr.git
The following commit(s) were added to refs/heads/main by this push:
new 024f9737178 SOLR-17579: Refactoring suggested by IDE in
ReplicationHandler and tests (#2893)
024f9737178 is described below
commit 024f9737178624065d0ceada4d3b9fd5aac80341
Author: Eric Pugh <[email protected]>
AuthorDate: Sat Dec 14 06:35:57 2024 -0500
SOLR-17579: Refactoring suggested by IDE in ReplicationHandler and tests
(#2893)
* Refactoring suggested by IDE.
* Typos, some dead methods and dead variables.
---
solr/CHANGES.txt | 3 +-
.../java/org/apache/solr/handler/IndexFetcher.java | 57 +++++++---------------
.../apache/solr/handler/ReplicationHandler.java | 35 ++++---------
.../solr/handler/TestReplicationHandler.java | 20 ++++----
.../solr/handler/TestReplicationHandlerBackup.java | 2 +-
5 files changed, 40 insertions(+), 77 deletions(-)
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 2eefba28dbb..a5f6c24d1d1 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -164,7 +164,8 @@ Dependency Upgrades
Other Changes
---------------------
-(No changes)
+* SOLR-17579: Remove unused code and other refactorings in ReplicationHandler
and tests. Removed unused public
+ LOCAL_ACTIVITY_DURING_REPLICATION variable. (Eric Pugh)
================== 9.8.0 ==================
New Features
diff --git a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
index a933bb6e1d6..78244fdaedf 100644
--- a/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
+++ b/solr/core/src/java/org/apache/solr/handler/IndexFetcher.java
@@ -187,8 +187,6 @@ public class IndexFetcher {
private Integer soTimeout;
- private boolean downloadTlogFiles = false;
-
private boolean skipCommitOnLeaderVersionZero = true;
private boolean clearLocalIndexFirst = false;
@@ -209,7 +207,7 @@ public class IndexFetcher {
new IndexFetchResult("Local index commit is already in sync with
peer", true, null);
public static final IndexFetchResult INDEX_FETCH_FAILURE =
- new IndexFetchResult("Fetching lastest index is failed", false, null);
+ new IndexFetchResult("Fetching latest index is failed", false, null);
public static final IndexFetchResult INDEX_FETCH_SUCCESS =
new IndexFetchResult("Fetching latest index is successful", true,
null);
public static final IndexFetchResult LOCK_OBTAIN_FAILED =
@@ -224,8 +222,6 @@ public class IndexFetcher {
public static final IndexFetchResult PEER_INDEX_COMMIT_DELETED =
new IndexFetchResult(
"No files to download because IndexCommit in peer was deleted",
false, null);
- public static final IndexFetchResult LOCAL_ACTIVITY_DURING_REPLICATION =
- new IndexFetchResult("Local index modification during replication",
false, null);
public static final IndexFetchResult EXPECTING_NON_LEADER =
new IndexFetchResult("Replicating from leader but I'm the shard
leader", false, null);
public static final IndexFetchResult LEADER_IS_NOT_ACTIVE =
@@ -402,7 +398,7 @@ public class IndexFetcher {
}
/**
- * This command downloads all the necessary files from leader to install a
index commit point.
+ * This command downloads all the necessary files from leader to install an
index commit point.
* Only changed files are downloaded. It also downloads the conf files (if
they are modified).
*
* @param forceReplication force a replication in all cases
@@ -670,7 +666,7 @@ public class IndexFetcher {
latestGeneration);
final long timeTakenSeconds = getReplicationTimeElapsed();
final Long bytesDownloadedPerSecond =
- (timeTakenSeconds != 0 ? Long.valueOf(bytesDownloaded /
timeTakenSeconds) : null);
+ (timeTakenSeconds != 0 ? bytesDownloaded / timeTakenSeconds :
null);
log.info(
"Total time taken for download (fullCopy={},bytesDownloaded={})
: {} secs ({} bytes/sec) to {}",
isFullCopyNeeded,
@@ -798,8 +794,7 @@ public class IndexFetcher {
Directory indexDir,
boolean deleteTmpIdxDir,
File tmpTlogDir,
- boolean successfulInstall)
- throws IOException {
+ boolean successfulInstall) {
try {
if (!successfulInstall) {
try {
@@ -847,7 +842,9 @@ public class IndexFetcher {
log.error("Error releasing indexDir", e);
}
try {
- if (tmpTlogDir != null) delTree(tmpTlogDir);
+ if (tmpTlogDir != null) {
+ delTree(tmpTlogDir);
+ }
} catch (Exception e) {
log.error("Error deleting tmpTlogDir", e);
}
@@ -1368,7 +1365,7 @@ public class IndexFetcher {
* All the files which are common between leader and follower must have same
size and same
* checksum else we assume they are not compatible (stale).
*
- * @return true if the index stale and we need to download a fresh copy,
false otherwise.
+ * @return true if the index is stale, and we need to download a fresh copy,
false otherwise.
* @throws IOException if low level io error
*/
private boolean isIndexStale(Directory dir) throws IOException {
@@ -1516,7 +1513,7 @@ public class IndexFetcher {
/**
* The tlog files are moved from the tmp dir to the tlog dir as an atomic
filesystem operation. A
* backup of the old directory is maintained. If the directory move fails,
it will try to revert
- * back the original tlog directory.
+ * the original tlog directory.
*/
private boolean copyTmpTlogFiles2Tlog(File tmpTlogDir) {
Path tlogDir =
@@ -1540,11 +1537,11 @@ public class IndexFetcher {
} catch (IOException e) {
log.error("Unable to rename: {} to: {}", src, tlogDir, e);
- // In case of error, try to revert back the original tlog directory
+ // In case of error, try to revert the original tlog directory
try {
Files.move(backupTlogDir, tlogDir, StandardCopyOption.ATOMIC_MOVE);
} catch (IOException e2) {
- // bad, we were not able to revert back the original tlog directory
+ // bad, we were not able to revert the original tlog directory
throw new SolrException(
SolrException.ErrorCode.SERVER_ERROR,
"Unable to rename: " + backupTlogDir + " to: " + tlogDir);
@@ -1598,23 +1595,6 @@ public class IndexFetcher {
return nameVsFile.isEmpty() ? Collections.emptyList() :
nameVsFile.values();
}
- /**
- * This simulates File.delete exception-wise, since this class has some
strange behavior with it.
- * The only difference is it returns null on success, throws
SecurityException on
- * SecurityException, otherwise returns Throwable preventing deletion
(instead of false), for
- * additional information.
- */
- static Throwable delete(File file) {
- try {
- Files.delete(file.toPath());
- return null;
- } catch (SecurityException e) {
- throw e;
- } catch (Throwable other) {
- return other;
- }
- }
-
static boolean delTree(File dir) {
try {
org.apache.lucene.util.IOUtils.rm(dir.toPath());
@@ -1730,8 +1710,7 @@ public class IndexFetcher {
Map<String, Object> fileDetails,
String saveAs,
String solrParamOutput,
- long latestGen)
- throws IOException {
+ long latestGen) {
this.file = file;
this.fileName = (String) fileDetails.get(NAME);
this.size = (Long) fileDetails.get(SIZE);
@@ -1781,7 +1760,7 @@ public class IndexFetcher {
}
} finally {
cleanup();
- // if cleanup succeeds . The file is downloaded fully. do an fsync
+ // if cleanup succeeds, and the file is downloaded fully, then do a
fsync.
fsyncService.execute(
() -> {
try {
@@ -1885,8 +1864,8 @@ public class IndexFetcher {
}
/**
- * The webcontainer flushes the data only after it fills the buffer size.
So, all data has to be
- * read as readFully() other wise it fails. So read everything as bytes
and then extract an
+ * The web container flushes the data only after it fills the buffer size.
So, all data has to
+ * be read as readFully() otherwise it fails. So read everything as bytes
and then extract an
* integer out of it
*/
private int readInt(byte[] b) {
@@ -1953,7 +1932,7 @@ public class IndexFetcher {
}
// wt=filestream this is a custom protocol
params.set(CommonParams.WT, FILE_STREAM);
- // This happen if there is a failure there is a retry. the
offset=<sizedownloaded> ensures
+ // This happens if there is a failure there is a retry. the
offset=<sizedownloaded> ensures
// that the server starts from the offset
if (bytesDownloaded > 0) {
params.set(OFFSET, Long.toString(bytesDownloaded));
@@ -2046,16 +2025,14 @@ public class IndexFetcher {
}
private static class LocalFsFile implements FileInterface {
- private File copy2Dir;
FileChannel fileChannel;
private FileOutputStream fileOutputStream;
File file;
LocalFsFile(File dir, String saveAs) throws IOException {
- this.copy2Dir = dir;
- this.file = new File(copy2Dir, saveAs);
+ this.file = new File(dir, saveAs);
File parentDir = this.file.getParentFile();
if (!parentDir.exists()) {
diff --git a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
index d059ad38d53..a72fd4363e8 100644
--- a/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
+++ b/solr/core/src/java/org/apache/solr/handler/ReplicationHandler.java
@@ -42,7 +42,6 @@ import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
@@ -153,8 +152,6 @@ public class ReplicationHandler extends RequestHandlerBase
private static final Logger log =
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
SolrCore core;
- private volatile boolean closed = false;
-
@Override
public Name getPermissionName(AuthorizationContext request) {
return Name.READ_PERM;
@@ -227,8 +224,6 @@ public class ReplicationHandler extends RequestHandlerBase
private volatile long executorStartTime;
- private int numTimesReplicated = 0;
-
private final Map<String, FileInfo> confFileInfoCache = new HashMap<>();
private Long reserveCommitDuration = readIntervalMs("00:00:10");
@@ -323,8 +318,7 @@ public class ReplicationHandler extends RequestHandlerBase
* @see IndexFetcher.LocalFsFileFetcher
* @see IndexFetcher.DirectoryFileFetcher
*/
- private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp,
SolrQueryRequest req)
- throws IOException {
+ private void getFileStream(SolrParams solrParams, SolrQueryResponse rsp,
SolrQueryRequest req) {
final CoreReplication coreReplicationAPI = new CoreReplication(core, req,
rsp);
String fileName;
String dirType;
@@ -800,14 +794,6 @@ public class ReplicationHandler extends RequestHandlerBase
return nextTime;
}
- int getTimesReplicatedSinceStartup() {
- return numTimesReplicated;
- }
-
- void setTimesReplicatedSinceStartup() {
- numTimesReplicated++;
- }
-
@Override
public Category getCategory() {
return Category.REPLICATION;
@@ -1043,7 +1029,7 @@ public class ReplicationHandler extends RequestHandlerBase
follower.add("replicationStartTime",
replicationStartTimeStamp.toString());
}
long elapsed = fetcher.getReplicationTimeElapsed();
- follower.add("timeElapsed", String.valueOf(elapsed) + "s");
+ follower.add("timeElapsed", elapsed + "s");
if (bytesDownloaded > 0)
estimatedTimeRemaining =
@@ -1108,13 +1094,13 @@ public class ReplicationHandler extends
RequestHandlerBase
if (s == null || s.trim().length() == 0) return null;
if (clzz == Date.class) {
try {
- Long l = Long.parseLong(s);
+ long l = Long.parseLong(s);
return new Date(l).toString();
} catch (NumberFormatException e) {
return null;
}
} else if (clzz == List.class) {
- String ss[] = s.split(",");
+ String[] ss = s.split(",");
List<String> l = new ArrayList<>();
for (String s1 : ss) {
l.add(new Date(Long.parseLong(s1)).toString());
@@ -1272,11 +1258,11 @@ public class ReplicationHandler extends
RequestHandlerBase
if (enableLeader) {
includeConfFiles = (String) leader.get(CONF_FILES);
if (includeConfFiles != null && includeConfFiles.trim().length() > 0) {
- List<String> files = Arrays.asList(includeConfFiles.split(","));
+ String[] files = includeConfFiles.split(",");
for (String file : files) {
if (file.trim().length() == 0) continue;
String[] strs = file.trim().split(":");
- // if there is an alias add it or it is null
+ // if there is an alias add it, or it is null
confFileNameAlias.add(strs[0], strs.length > 1 ? strs[1] : null);
}
log.info("Replication enabled for following config files: {}",
includeConfFiles);
@@ -1347,7 +1333,7 @@ public class ReplicationHandler extends RequestHandlerBase
}
}
- // ensure the writer is init'd so that we have a list of commit
points
+ // ensure the writer is initialized so that we have a list of commit
points
RefCounted<IndexWriter> iw =
core.getUpdateHandler().getSolrCoreState().getIndexWriter(core);
iw.decref();
@@ -1532,7 +1518,8 @@ public class ReplicationHandler extends RequestHandlerBase
public static final String FETCH_FROM_LEADER = "fetchFromLeader";
// in case of TLOG replica, if leaderVersion = zero, don't do commit
- // otherwise updates from current tlog won't copied over properly to the new
tlog, leading to data
+ // otherwise updates from current tlog won't be copied over properly to the
new tlog, leading to
+ // data
// loss
// don't commit on leader version zero for PULL replicas as PULL should only
get its index
// state from leader
@@ -1576,8 +1563,6 @@ public class ReplicationHandler extends RequestHandlerBase
public static final String ALIAS = "alias";
- public static final String CONF_CHECKSUM = "confchecksum";
-
public static final String CONF_FILES = "confFiles";
public static final String REPLICATE_AFTER = "replicateAfter";
@@ -1601,7 +1586,7 @@ public class ReplicationHandler extends RequestHandlerBase
/**
* Boolean param for tests that can be specified when using {@link
#CMD_FETCH_INDEX} to force the
* current request to block until the fetch is complete. <b>NOTE:</b> This
param is not advised
- * for non-test code, since the duration of the fetch for non-trivial
indexes will likeley cause
+ * for non-test code, since the duration of the fetch for non-trivial
indexes will likely cause
* the request to time out.
*
* @lucene.internal
diff --git
a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
index b30e20f70bf..3096ec10bf4 100644
--- a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
+++ b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandler.java
@@ -304,8 +304,8 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
// check details on the follower a couple of times before & after fetching
for (int i = 0; i < 3; i++) {
NamedList<Object> details = getDetails(followerClient);
- assertNotNull(i + ": " + details);
- assertNotNull(i + ": " + details.toString(), details.get("follower"));
+ assertNotNull(i + ": " + details, details);
+ assertNotNull(i + ": " + details, details.get("follower"));
if (i > 0) {
rQuery(i, "*:*", followerClient);
@@ -459,7 +459,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
index(followerClient, "id", 555, "name", "name = " + 555);
followerClient.commit(true, true);
- // this doc is added to follower so it should show an item w/ that result
+ // this doc is added to follower, so it should show an item w/ that result
assertEquals(1, numFound(rQuery(1, "id:555", followerClient)));
// Let's fetch the index rather than rely on the polling.
@@ -528,7 +528,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
followerJetty.stop();
- // setup an sub directory /foo/ in order to force subdir file replication
+ // set up a subdirectory /foo/ in order to force subdir file replication
File leaderFooDir = new File(leader.getConfDir() + File.separator + "foo");
File leaderBarFile = new File(leaderFooDir, "bar.txt");
assertTrue("could not make dir " + leaderFooDir, leaderFooDir.mkdirs());
@@ -551,7 +551,7 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
followerQueryRsp = rQuery(1, "*:*", followerClient);
assertVersions(leaderClient, followerClient);
SolrDocument d = ((SolrDocumentList)
followerQueryRsp.get("response")).get(0);
- assertEquals("newname = 2000", (String) d.getFieldValue("newname"));
+ assertEquals("newname = 2000", d.getFieldValue("newname"));
assertTrue(followerFooDir.isDirectory());
assertTrue(followerBarFile.exists());
@@ -596,8 +596,8 @@ public class TestReplicationHandler extends SolrTestCaseJ4 {
// get docs from leader and check if number is equal to leader
assertEquals(nDocs + 1, numFound(rQuery(nDocs + 1, "*:*", leaderClient)));
- // NOTE: this test is wierd, we want to verify it DOESNT replicate...
- // for now, add a sleep for this.., but the logic is wierd.
+ // NOTE: this test is weird, we want to verify it DOESN'T replicate...
+ // for now, add a sleep for this... but the logic is weird.
Thread.sleep(3000);
// get docs from follower and check if number is not equal to leader;
polling is disabled
@@ -1583,7 +1583,7 @@ public class TestReplicationHandler extends
SolrTestCaseJ4 {
index(leaderClient, "id", "1", "name", "foo");
- { // second backup w/uncommited doc
+ { // second backup w/uncommitted doc
final String backupName = "empty_backup2";
final GenericSolrRequest req =
new GenericSolrRequest(
@@ -1695,7 +1695,7 @@ public class TestReplicationHandler extends
SolrTestCaseJ4 {
return startTime;
}
} catch (SolrException e) {
- // workarround for SOLR-4668
+ // workaround for SOLR-4668
if (500 != e.code()) {
throw e;
} // else server possibly from the core reload in progress...
@@ -1705,7 +1705,7 @@ public class TestReplicationHandler extends
SolrTestCaseJ4 {
Thread.sleep(sleepInterval);
}
fail("timed out waiting for collection1 startAt time to exceed: " + min);
- return min; // compilation neccessity
+ return min; // compilation necessity
}
}
diff --git
a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandlerBackup.java
b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandlerBackup.java
index 418ce1d6551..032ae0c9c3e 100644
---
a/solr/core/src/test/org/apache/solr/handler/TestReplicationHandlerBackup.java
+++
b/solr/core/src/test/org/apache/solr/handler/TestReplicationHandlerBackup.java
@@ -238,7 +238,7 @@ public class TestReplicationHandlerBackup extends
SolrJettyTestBase {
}
}
- private void testDeleteNamedBackup(String backupNames[]) throws Exception {
+ private void testDeleteNamedBackup(String[] backupNames) throws Exception {
final BackupStatusChecker backupStatus =
new BackupStatusChecker(leaderClient, "/" + DEFAULT_TEST_CORENAME +
"/replication");
for (int i = 0; i < 2; i++) {