[jira] [Work logged] (HDFS-16660) Improve Code With Lambda in IPCLoggerChannel class
[ https://issues.apache.org/jira/browse/HDFS-16660?focusedWorklogId=791206&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791206 ] ASF GitHub Bot logged work on HDFS-16660: - Author: ASF GitHub Bot Created on: 15/Jul/22 01:15 Start Date: 15/Jul/22 01:15 Worklog Time Spent: 10m Work Description: goiri commented on code in PR #4561: URL: https://github.com/apache/hadoop/pull/4561#discussion_r921726293 ## hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java: ## @@ -394,46 +352,39 @@ public ListenableFuture sendEdits( ListenableFuture ret = null; try { - ret = singleThreadExecutor.submit(new Callable() { -@Override -public Void call() throws IOException { - throwIfOutOfSync(); - - long rpcSendTimeNanos = System.nanoTime(); - try { -getProxy().journal(createReqInfo(), -segmentTxId, firstTxnId, numTxns, data); - } catch (IOException e) { -QuorumJournalManager.LOG.warn( -"Remote journal " + IPCLoggerChannel.this + " failed to " + -"write txns " + firstTxnId + "-" + (firstTxnId + numTxns - 1) + -". Will try to write to this JN again after the next " + -"log roll.", e); -synchronized (IPCLoggerChannel.this) { - outOfSync = true; -} -throw e; - } finally { -long now = System.nanoTime(); -long rpcTime = TimeUnit.MICROSECONDS.convert( -now - rpcSendTimeNanos, TimeUnit.NANOSECONDS); -long endToEndTime = TimeUnit.MICROSECONDS.convert( -now - submitNanos, TimeUnit.NANOSECONDS); -metrics.addWriteEndToEndLatency(endToEndTime); -metrics.addWriteRpcLatency(rpcTime); -if (rpcTime / 1000 > WARN_JOURNAL_MILLIS_THRESHOLD) { - QuorumJournalManager.LOG.warn( - "Took " + (rpcTime / 1000) + "ms to send a batch of " + - numTxns + " edits (" + data.length + " bytes) to " + - "remote journal " + IPCLoggerChannel.this); -} - } + ret = singleThreadExecutor.submit(() -> { +throwIfOutOfSync(); + +long rpcSendTimeNanos = System.nanoTime(); +try { + getProxy().journal(createReqInfo(), segmentTxId, firstTxnId, numTxns, data); +} catch (IOException e) { + QuorumJournalManager.LOG.warn("Remote journal {} failed to write txns {}-{}." + + " Will try to write to this JN again after the next log roll.", + IPCLoggerChannel.this, firstTxnId, (firstTxnId + numTxns - 1), e); synchronized (IPCLoggerChannel.this) { -highestAckedTxId = firstTxnId + numTxns - 1; -lastAckNanos = submitNanos; +outOfSync = true; } - return null; + throw e; +} finally { + long now = System.nanoTime(); + long rpcTime = TimeUnit.MICROSECONDS.convert( + now - rpcSendTimeNanos, TimeUnit.NANOSECONDS); + long endToEndTime = TimeUnit.MICROSECONDS.convert( Review Comment: endToEndTimeMs? ## hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java: ## @@ -394,46 +352,39 @@ public ListenableFuture sendEdits( ListenableFuture ret = null; try { - ret = singleThreadExecutor.submit(new Callable() { -@Override -public Void call() throws IOException { - throwIfOutOfSync(); - - long rpcSendTimeNanos = System.nanoTime(); - try { -getProxy().journal(createReqInfo(), -segmentTxId, firstTxnId, numTxns, data); - } catch (IOException e) { -QuorumJournalManager.LOG.warn( -"Remote journal " + IPCLoggerChannel.this + " failed to " + -"write txns " + firstTxnId + "-" + (firstTxnId + numTxns - 1) + -". Will try to write to this JN again after the next " + -"log roll.", e); -synchronized (IPCLoggerChannel.this) { - outOfSync = true; -} -throw e; - } finally { -long now = System.nanoTime(); -long rpcTime = TimeUnit.MICROSECONDS.convert( -now - rpcSendTimeNanos, TimeUnit.NANOSECONDS); -long endToEndTime = TimeUnit.MICROSECONDS.convert( -now - submitNanos, TimeUnit.NANOSECONDS); -metrics.addWriteEndToEndLatency(endToEndTime); -metrics.addWriteRpcLatency(rpcTime); -if (rpcTime / 1000 > WARN_JOURNAL_MILLIS_THRESHOLD) { - QuorumJournalManager.LOG.warn( -
[jira] [Work logged] (HDFS-15079) RBF: Client maybe get an unexpected result with network anomaly
[ https://issues.apache.org/jira/browse/HDFS-15079?focusedWorklogId=791199&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791199 ] ASF GitHub Bot logged work on HDFS-15079: - Author: ASF GitHub Bot Created on: 15/Jul/22 00:28 Start Date: 15/Jul/22 00:28 Worklog Time Spent: 10m Work Description: hadoop-yetus commented on PR #4530: URL: https://github.com/apache/hadoop/pull/4530#issuecomment-1185050674 :confetti_ball: **+1 overall** | Vote | Subsystem | Runtime | Logfile | Comment | |::|--:|:|::|:---:| | +0 :ok: | reexec | 1m 1s | | Docker mode activated. | _ Prechecks _ | | +1 :green_heart: | dupname | 0m 0s | | No case conflicting files found. | | +0 :ok: | codespell | 0m 0s | | codespell was not available. | | +0 :ok: | detsecrets | 0m 1s | | detect-secrets was not available. | | +1 :green_heart: | @author | 0m 0s | | The patch does not contain any @author tags. | | +1 :green_heart: | test4tests | 0m 0s | | The patch appears to include 3 new or modified test files. | _ trunk Compile Tests _ | | +0 :ok: | mvndep | 14m 28s | | Maven dependency ordering for branch | | +1 :green_heart: | mvninstall | 28m 23s | | trunk passed | | +1 :green_heart: | compile | 25m 12s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | compile | 22m 29s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | checkstyle | 5m 14s | | trunk passed | | +1 :green_heart: | mvnsite | 6m 7s | | trunk passed | | +1 :green_heart: | javadoc | 5m 27s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 5m 16s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 9m 0s | | trunk passed | | +1 :green_heart: | shadedclient | 24m 53s | | branch has no errors when building and testing our client artifacts. | _ Patch Compile Tests _ | | +0 :ok: | mvndep | 0m 26s | | Maven dependency ordering for patch | | +1 :green_heart: | mvninstall | 3m 11s | | the patch passed | | +1 :green_heart: | compile | 24m 11s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javac | 24m 11s | | the patch passed | | +1 :green_heart: | compile | 21m 53s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | javac | 21m 53s | | the patch passed | | +1 :green_heart: | blanks | 0m 0s | | The patch has no blanks issues. | | +1 :green_heart: | checkstyle | 4m 27s | | root: The patch generated 0 new + 245 unchanged - 6 fixed = 245 total (was 251) | | +1 :green_heart: | mvnsite | 5m 19s | | the patch passed | | +1 :green_heart: | javadoc | 4m 7s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 4m 37s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 9m 10s | | the patch passed | | +1 :green_heart: | shadedclient | 25m 8s | | patch has no errors when building and testing our client artifacts. | _ Other Tests _ | | +1 :green_heart: | unit | 18m 20s | | hadoop-common in the patch passed. | | +1 :green_heart: | unit | 382m 1s | | hadoop-hdfs in the patch passed. | | +1 :green_heart: | unit | 34m 58s | | hadoop-hdfs-rbf in the patch passed. | | +1 :green_heart: | asflicense | 1m 41s | | The patch does not generate ASF License warnings. | | | | 690m 57s | | | | Subsystem | Report/Notes | |--:|:-| | Docker | ClientAPI=1.41 ServerAPI=1.41 base: https://ci-hadoop.apache.org/job/hadoop-multibranch/job/PR-4530/4/artifact/out/Dockerfile | | GITHUB PR | https://github.com/apache/hadoop/pull/4530 | | Optional Tests | dupname asflicense compile javac javadoc mvninstall mvnsite unit shadedclient spotbugs checkstyle codespell detsecrets | | uname | Linux 080e9ebce89a 4.15.0-175-generic #184-Ubuntu SMP Thu Mar 24 17:48:36 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux | | Build tool | maven | | Personality | dev-support/bin/hadoop.sh | | git revision | trunk / cc4eaaec28e444d9bc27b6ba732e288019deab23 | | Default Java | Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | Multi-JDK versions | /usr/lib/jvm/java-11-openjdk-amd64:Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 /usr/lib/jvm/java-8-openjdk-amd64:Private Build-1.
[jira] [Work logged] (HDFS-16661) Improve Code With Lambda in AsyncLoggerSet class
[ https://issues.apache.org/jira/browse/HDFS-16661?focusedWorklogId=791184&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791184 ] ASF GitHub Bot logged work on HDFS-16661: - Author: ASF GitHub Bot Created on: 14/Jul/22 23:24 Start Date: 14/Jul/22 23:24 Worklog Time Spent: 10m Work Description: goiri commented on code in PR #4565: URL: https://github.com/apache/hadoop/pull/4565#discussion_r921689045 ## hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java: ## @@ -206,197 +194,114 @@ List getLoggersForTests() { /// public QuorumCall getJournalState() { -Map> calls = -Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.getJournalState()); -} +Map> calls = Maps.newHashMap(); +loggers.forEach(l -> calls.put(l, l.getJournalState())); return QuorumCall.create(calls); } public QuorumCall isFormatted() { Map> calls = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.isFormatted()); -} +loggers.forEach(l -> calls.put(l, l.isFormatted())); return QuorumCall.create(calls); } - public QuorumCall newEpoch( - NamespaceInfo nsInfo, - long epoch) { -Map> calls = -Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.newEpoch(epoch)); -} + public QuorumCall newEpoch(long epoch) { +Map> calls = Maps.newHashMap(); +loggers.forEach(l -> calls.put(l, l.newEpoch(epoch))); return QuorumCall.create(calls); } - public QuorumCall startLogSegment( - long txid, int layoutVersion) { + public QuorumCall startLogSegment(long txid, int layoutVersion) { Map> calls = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.startLogSegment(txid, layoutVersion)); -} +loggers.forEach(l -> calls.put(l, l.startLogSegment(txid, layoutVersion))); return QuorumCall.create(calls); } - public QuorumCall finalizeLogSegment(long firstTxId, - long lastTxId) { + public QuorumCall finalizeLogSegment(long firstTxId, long lastTxId) { Map> calls = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.finalizeLogSegment(firstTxId, lastTxId)); -} +loggers.forEach(l -> calls.put(l, l.finalizeLogSegment(firstTxId, lastTxId))); return QuorumCall.create(calls); } public QuorumCall sendEdits( long segmentTxId, long firstTxnId, int numTxns, byte[] data) { Map> calls = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - ListenableFuture future = -logger.sendEdits(segmentTxId, firstTxnId, numTxns, data); - calls.put(logger, future); -} +loggers.forEach(l -> calls.put(l, l.sendEdits(segmentTxId, firstTxnId, numTxns, data))); return QuorumCall.create(calls); } public QuorumCall getJournaledEdits(long fromTxnId, int maxTransactions) { -Map> calls -= Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - ListenableFuture future = - logger.getJournaledEdits(fromTxnId, maxTransactions); - calls.put(logger, future); -} +Map> calls = Maps.newHashMap(); Review Comment: As we are at it, can we get rid of the guava use? We should also set it with the capacity being `loggers.size()` Issue Time Tracking --- Worklog Id: (was: 791184) Time Spent: 50m (was: 40m) > Improve Code With Lambda in AsyncLoggerSet class > > > Key: HDFS-16661 > URL: https://issues.apache.org/jira/browse/HDFS-16661 > Project: Hadoop HDFS > Issue Type: Improvement >Reporter: ZanderXu >Assignee: ZanderXu >Priority: Major > Labels: pull-request-available > Time Spent: 50m > Remaining Estimate: 0h > > Improve Code With Lambda in AsyncLoggerSet class -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Work logged] (HDFS-16661) Improve Code With Lambda in AsyncLoggerSet class
[ https://issues.apache.org/jira/browse/HDFS-16661?focusedWorklogId=791182&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791182 ] ASF GitHub Bot logged work on HDFS-16661: - Author: ASF GitHub Bot Created on: 14/Jul/22 23:21 Start Date: 14/Jul/22 23:21 Worklog Time Spent: 10m Work Description: goiri commented on code in PR #4565: URL: https://github.com/apache/hadoop/pull/4565#discussion_r921687765 ## hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java: ## @@ -206,197 +194,114 @@ List getLoggersForTests() { /// public QuorumCall getJournalState() { -Map> calls = -Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.getJournalState()); -} +Map> calls = Maps.newHashMap(); +loggers.forEach(l -> calls.put(l, l.getJournalState())); return QuorumCall.create(calls); } public QuorumCall isFormatted() { Map> calls = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.isFormatted()); -} +loggers.forEach(l -> calls.put(l, l.isFormatted())); return QuorumCall.create(calls); } - public QuorumCall newEpoch( - NamespaceInfo nsInfo, - long epoch) { -Map> calls = -Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.newEpoch(epoch)); -} + public QuorumCall newEpoch(long epoch) { +Map> calls = Maps.newHashMap(); +loggers.forEach(l -> calls.put(l, l.newEpoch(epoch))); return QuorumCall.create(calls); } - public QuorumCall startLogSegment( - long txid, int layoutVersion) { + public QuorumCall startLogSegment(long txid, int layoutVersion) { Map> calls = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.startLogSegment(txid, layoutVersion)); -} +loggers.forEach(l -> calls.put(l, l.startLogSegment(txid, layoutVersion))); return QuorumCall.create(calls); } - public QuorumCall finalizeLogSegment(long firstTxId, - long lastTxId) { + public QuorumCall finalizeLogSegment(long firstTxId, long lastTxId) { Map> calls = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.finalizeLogSegment(firstTxId, lastTxId)); -} +loggers.forEach(l -> calls.put(l, l.finalizeLogSegment(firstTxId, lastTxId))); return QuorumCall.create(calls); } public QuorumCall sendEdits( long segmentTxId, long firstTxnId, int numTxns, byte[] data) { Map> calls = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - ListenableFuture future = -logger.sendEdits(segmentTxId, firstTxnId, numTxns, data); - calls.put(logger, future); -} +loggers.forEach(l -> calls.put(l, l.sendEdits(segmentTxId, firstTxnId, numTxns, data))); return QuorumCall.create(calls); } public QuorumCall getJournaledEdits(long fromTxnId, int maxTransactions) { -Map> calls -= Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - ListenableFuture future = - logger.getJournaledEdits(fromTxnId, maxTransactions); - calls.put(logger, future); -} +Map> calls = Maps.newHashMap(); +loggers.forEach(l -> calls.put(l, l.getJournaledEdits(fromTxnId, maxTransactions))); return QuorumCall.create(calls); } public QuorumCall getEditLogManifest( long fromTxnId, boolean inProgressOk) { -Map> calls -= Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - ListenableFuture future = - logger.getEditLogManifest(fromTxnId, inProgressOk); - calls.put(logger, future); -} +Map> calls = Maps.newHashMap(); +loggers.forEach(l -> calls.put(l, l.getEditLogManifest(fromTxnId, inProgressOk))); return QuorumCall.create(calls); } - QuorumCall - prepareRecovery(long segmentTxId) { -Map> calls - = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - ListenableFuture future = - logger.prepareRecovery(segmentTxId); - calls.put(logger, future); -} + QuorumCall prepareRecovery(long segmentTxId) { +Map> calls = Maps.newHashMap(); +loggers.forEach(l -> calls.put(l, l.prepareRecovery(segmentTxId))); return QuorumCall.create(calls); } - QuorumCall - acceptRecovery(SegmentStateProto log, URL fromURL) { -Map> calls - = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - ListenableFuture future = - logger.acceptRecovery(log, fromURL); - calls.put(logger, future); -} + QuorumCall acceptRecovery(SegmentStateProto log, URL fromURL) { Review Comment: QuorumCall accept
[jira] [Work logged] (HDFS-16605) Improve Code With Lambda in hadoop-hdfs-rbf moudle
[ https://issues.apache.org/jira/browse/HDFS-16605?focusedWorklogId=791181&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791181 ] ASF GitHub Bot logged work on HDFS-16605: - Author: ASF GitHub Bot Created on: 14/Jul/22 23:20 Start Date: 14/Jul/22 23:20 Worklog Time Spent: 10m Work Description: goiri commented on code in PR #4375: URL: https://github.com/apache/hadoop/pull/4375#discussion_r921680900 ## hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java: ## @@ -167,21 +167,18 @@ protected synchronized void startPeriodic() { stopPeriodic(); // Create the runnable service -Runnable updateRunnable = new Runnable() { - @Override - public void run() { -LOG.debug("Running {} update task", serviceName); -try { - if (!isRunning) { -return; - } - periodicInvoke(); - runCount++; - lastRun = Time.now(); -} catch (Exception ex) { - errorCount++; - LOG.warn(serviceName + " service threw an exception", ex); +Runnable updateRunnable = () -> { + LOG.debug("Running {} update task", serviceName); + try { +if (!isRunning) { + return; } +periodicInvoke(); +runCount++; +lastRun = Time.now(); + } catch (Exception ex) { +errorCount++; +LOG.warn(serviceName + " service threw an exception", ex); Review Comment: Use {} ## hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterClientRejectOverload.java: ## @@ -206,33 +206,30 @@ private void testOverloaded(int expOverloadMin, int expOverloadMax, for (int i = 0; i < numOps; i++) { // Stagger the operations a little (50ms) final int sleepTime = i * 50; - Future future = exec.submit(new Runnable() { -@Override -public void run() { - DFSClient routerClient = null; - try { -Thread.sleep(sleepTime); -routerClient = new DFSClient(address, conf); -String clientName = routerClient.getClientName(); -ClientProtocol routerProto = routerClient.getNamenode(); -routerProto.renewLease(clientName, null); - } catch (RemoteException re) { -IOException ioe = re.unwrapRemoteException(); -assertTrue("Wrong exception: " + ioe, -ioe instanceof StandbyException); -assertExceptionContains("is overloaded", ioe); -overloadException.incrementAndGet(); - } catch (IOException e) { -fail("Unexpected exception: " + e); - } catch (InterruptedException e) { -fail("Cannot sleep: " + e); - } finally { -if (routerClient != null) { - try { -routerClient.close(); - } catch (IOException e) { -LOG.error("Cannot close the client"); - } + Future future = exec.submit(() -> { +DFSClient routerClient = null; +try { + Thread.sleep(sleepTime); + routerClient = new DFSClient(address, conf); + String clientName = routerClient.getClientName(); + ClientProtocol routerProto = routerClient.getNamenode(); + routerProto.renewLease(clientName, null); +} catch (RemoteException re) { + IOException ioe = re.unwrapRemoteException(); + assertTrue("Wrong exception: " + ioe, Review Comment: Single line ## hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterFaultTolerant.java: ## @@ -168,7 +169,7 @@ public void cleanup() throws Exception { } namenodes.clear(); -routers.forEach(router -> router.stop()); +routers.forEach(AbstractService::stop); Review Comment: Can we do `Router::stop`? Issue Time Tracking --- Worklog Id: (was: 791181) Time Spent: 3h 50m (was: 3h 40m) > Improve Code With Lambda in hadoop-hdfs-rbf moudle > -- > > Key: HDFS-16605 > URL: https://issues.apache.org/jira/browse/HDFS-16605 > Project: Hadoop HDFS > Issue Type: Improvement > Components: rbf >Affects Versions: 3.4.0 >Reporter: fanshilun >Assignee: fanshilun >Priority: Minor > Labels: pull-request-available > Time Spent: 3h 50m > Remaining Estimate: 0h > -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional
[jira] [Work logged] (HDFS-16605) Improve Code With Lambda in hadoop-hdfs-rbf moudle
[ https://issues.apache.org/jira/browse/HDFS-16605?focusedWorklogId=791164&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791164 ] ASF GitHub Bot logged work on HDFS-16605: - Author: ASF GitHub Bot Created on: 14/Jul/22 21:46 Start Date: 14/Jul/22 21:46 Worklog Time Spent: 10m Work Description: slfan1989 commented on PR #4375: URL: https://github.com/apache/hadoop/pull/4375#issuecomment-1184923998 @goiri Can you help to merge this pr into trunk branch? thank you very much! Issue Time Tracking --- Worklog Id: (was: 791164) Time Spent: 3h 40m (was: 3.5h) > Improve Code With Lambda in hadoop-hdfs-rbf moudle > -- > > Key: HDFS-16605 > URL: https://issues.apache.org/jira/browse/HDFS-16605 > Project: Hadoop HDFS > Issue Type: Improvement > Components: rbf >Affects Versions: 3.4.0 >Reporter: fanshilun >Assignee: fanshilun >Priority: Minor > Labels: pull-request-available > Time Spent: 3h 40m > Remaining Estimate: 0h > -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Work logged] (HDFS-16661) Improve Code With Lambda in AsyncLoggerSet class
[ https://issues.apache.org/jira/browse/HDFS-16661?focusedWorklogId=791151&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791151 ] ASF GitHub Bot logged work on HDFS-16661: - Author: ASF GitHub Bot Created on: 14/Jul/22 21:15 Start Date: 14/Jul/22 21:15 Worklog Time Spent: 10m Work Description: hadoop-yetus commented on PR #4565: URL: https://github.com/apache/hadoop/pull/4565#issuecomment-1184902970 :broken_heart: **-1 overall** | Vote | Subsystem | Runtime | Logfile | Comment | |::|--:|:|::|:---:| | +0 :ok: | reexec | 0m 40s | | Docker mode activated. | _ Prechecks _ | | +1 :green_heart: | dupname | 0m 0s | | No case conflicting files found. | | +0 :ok: | codespell | 0m 0s | | codespell was not available. | | +0 :ok: | detsecrets | 0m 0s | | detect-secrets was not available. | | +1 :green_heart: | @author | 0m 0s | | The patch does not contain any @author tags. | | -1 :x: | test4tests | 0m 0s | | The patch doesn't appear to include any new or modified tests. Please justify why no new tests are needed for this patch. Also please list what manual steps were performed to verify this patch. | _ trunk Compile Tests _ | | +1 :green_heart: | mvninstall | 37m 32s | | trunk passed | | +1 :green_heart: | compile | 1m 43s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | compile | 1m 37s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | checkstyle | 1m 27s | | trunk passed | | +1 :green_heart: | mvnsite | 1m 44s | | trunk passed | | +1 :green_heart: | javadoc | 1m 26s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 1m 47s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 3m 47s | | trunk passed | | +1 :green_heart: | shadedclient | 23m 13s | | branch has no errors when building and testing our client artifacts. | _ Patch Compile Tests _ | | +1 :green_heart: | mvninstall | 1m 24s | | the patch passed | | +1 :green_heart: | compile | 1m 25s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javac | 1m 25s | | the patch passed | | +1 :green_heart: | compile | 1m 23s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | javac | 1m 23s | | the patch passed | | +1 :green_heart: | blanks | 0m 0s | | The patch has no blanks issues. | | -0 :warning: | checkstyle | 1m 3s | [/results-checkstyle-hadoop-hdfs-project_hadoop-hdfs.txt](https://ci-hadoop.apache.org/job/hadoop-multibranch/job/PR-4565/1/artifact/out/results-checkstyle-hadoop-hdfs-project_hadoop-hdfs.txt) | hadoop-hdfs-project/hadoop-hdfs: The patch generated 2 new + 9 unchanged - 6 fixed = 11 total (was 15) | | +1 :green_heart: | mvnsite | 1m 29s | | the patch passed | | +1 :green_heart: | javadoc | 0m 59s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 1m 34s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 3m 19s | | the patch passed | | +1 :green_heart: | shadedclient | 22m 23s | | patch has no errors when building and testing our client artifacts. | _ Other Tests _ | | +1 :green_heart: | unit | 239m 9s | | hadoop-hdfs in the patch passed. | | +1 :green_heart: | asflicense | 1m 15s | | The patch does not generate ASF License warnings. | | | | 348m 44s | | | | Subsystem | Report/Notes | |--:|:-| | Docker | ClientAPI=1.41 ServerAPI=1.41 base: https://ci-hadoop.apache.org/job/hadoop-multibranch/job/PR-4565/1/artifact/out/Dockerfile | | GITHUB PR | https://github.com/apache/hadoop/pull/4565 | | Optional Tests | dupname asflicense compile javac javadoc mvninstall mvnsite unit shadedclient spotbugs checkstyle codespell detsecrets | | uname | Linux 138bf5b13f6c 4.15.0-58-generic #64-Ubuntu SMP Tue Aug 6 11:12:41 UTC 2019 x86_64 x86_64 x86_64 GNU/Linux | | Build tool | maven | | Personality | dev-support/bin/hadoop.sh | | git revision | trunk / 8f38839e6159bf7034ffae7e7ef1d2b424baa73f | | Default Java | Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | Multi-JDK versions | /usr/lib/jvm/java-11-openjdk-amd64:Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 /usr/lib/jvm/java-8-openjdk-amd64:Private Build-1.8
[jira] [Work logged] (HDFS-15079) RBF: Client maybe get an unexpected result with network anomaly
[ https://issues.apache.org/jira/browse/HDFS-15079?focusedWorklogId=791148&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791148 ] ASF GitHub Bot logged work on HDFS-15079: - Author: ASF GitHub Bot Created on: 14/Jul/22 20:50 Start Date: 14/Jul/22 20:50 Worklog Time Spent: 10m Work Description: goiri commented on PR #4530: URL: https://github.com/apache/hadoop/pull/4530#issuecomment-1184885136 It would be nice to get feedback from others. Issue Time Tracking --- Worklog Id: (was: 791148) Time Spent: 1.5h (was: 1h 20m) > RBF: Client maybe get an unexpected result with network anomaly > > > Key: HDFS-15079 > URL: https://issues.apache.org/jira/browse/HDFS-15079 > Project: Hadoop HDFS > Issue Type: Sub-task > Components: rbf >Affects Versions: 3.3.0 >Reporter: Hui Fei >Priority: Critical > Labels: pull-request-available > Attachments: HDFS-15079.001.patch, HDFS-15079.002.patch, > UnexpectedOverWriteUT.patch > > Time Spent: 1.5h > Remaining Estimate: 0h > > I find there is a critical problem on RBF, HDFS-15078 can resolve it on some > Scenarios, but i have no idea about the overall resolution. > The problem is that > Client with RBF(r0, r1) create a file HDFS file via r0, it gets Exception and > failovers to r1 > r0 has been send create rpc to namenode(1st create) > Client create a HDFS file via r1(2nd create) > Client writes the HDFS file and close it finally(3rd close) > Maybe namenode receiving the rpc in order as follow > 2nd create > 3rd close > 1st create > And overwrite is true by default, this would make the file had been written > an empty file. This is an critical problem > We had encountered this problem. There are many hive and spark jobs running > on our cluster, sometimes it occurs -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Work logged] (HDFS-16661) Improve Code With Lambda in AsyncLoggerSet class
[ https://issues.apache.org/jira/browse/HDFS-16661?focusedWorklogId=791137&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791137 ] ASF GitHub Bot logged work on HDFS-16661: - Author: ASF GitHub Bot Created on: 14/Jul/22 20:29 Start Date: 14/Jul/22 20:29 Worklog Time Spent: 10m Work Description: goiri commented on code in PR #4565: URL: https://github.com/apache/hadoop/pull/4565#discussion_r921545467 ## hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java: ## @@ -206,197 +194,114 @@ List getLoggersForTests() { /// public QuorumCall getJournalState() { -Map> calls = -Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.getJournalState()); -} +Map> calls = Maps.newHashMap(); +loggers.forEach(l -> calls.put(l, l.getJournalState())); return QuorumCall.create(calls); } public QuorumCall isFormatted() { Map> calls = Maps.newHashMap(); -for (AsyncLogger logger : loggers) { - calls.put(logger, logger.isFormatted()); -} +loggers.forEach(l -> calls.put(l, l.isFormatted())); return QuorumCall.create(calls); } - public QuorumCall newEpoch( - NamespaceInfo nsInfo, Review Comment: Why don't we use this anymore? Issue Time Tracking --- Worklog Id: (was: 791137) Time Spent: 20m (was: 10m) > Improve Code With Lambda in AsyncLoggerSet class > > > Key: HDFS-16661 > URL: https://issues.apache.org/jira/browse/HDFS-16661 > Project: Hadoop HDFS > Issue Type: Improvement >Reporter: ZanderXu >Assignee: ZanderXu >Priority: Major > Labels: pull-request-available > Time Spent: 20m > Remaining Estimate: 0h > > Improve Code With Lambda in AsyncLoggerSet class -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Work logged] (HDFS-16660) Improve Code With Lambda in IPCLoggerChannel class
[ https://issues.apache.org/jira/browse/HDFS-16660?focusedWorklogId=791096&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791096 ] ASF GitHub Bot logged work on HDFS-16660: - Author: ASF GitHub Bot Created on: 14/Jul/22 18:42 Start Date: 14/Jul/22 18:42 Worklog Time Spent: 10m Work Description: hadoop-yetus commented on PR #4561: URL: https://github.com/apache/hadoop/pull/4561#issuecomment-1184779786 :broken_heart: **-1 overall** | Vote | Subsystem | Runtime | Logfile | Comment | |::|--:|:|::|:---:| | +0 :ok: | reexec | 0m 38s | | Docker mode activated. | _ Prechecks _ | | +1 :green_heart: | dupname | 0m 0s | | No case conflicting files found. | | +0 :ok: | codespell | 0m 0s | | codespell was not available. | | +0 :ok: | detsecrets | 0m 0s | | detect-secrets was not available. | | +1 :green_heart: | @author | 0m 0s | | The patch does not contain any @author tags. | | -1 :x: | test4tests | 0m 0s | | The patch doesn't appear to include any new or modified tests. Please justify why no new tests are needed for this patch. Also please list what manual steps were performed to verify this patch. | _ trunk Compile Tests _ | | +1 :green_heart: | mvninstall | 37m 11s | | trunk passed | | +1 :green_heart: | compile | 1m 44s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | compile | 1m 37s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | checkstyle | 1m 24s | | trunk passed | | +1 :green_heart: | mvnsite | 1m 44s | | trunk passed | | +1 :green_heart: | javadoc | 1m 26s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 1m 49s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 3m 43s | | trunk passed | | +1 :green_heart: | shadedclient | 22m 58s | | branch has no errors when building and testing our client artifacts. | _ Patch Compile Tests _ | | +1 :green_heart: | mvninstall | 1m 24s | | the patch passed | | +1 :green_heart: | compile | 1m 28s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javac | 1m 28s | | the patch passed | | +1 :green_heart: | compile | 1m 19s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | javac | 1m 19s | | the patch passed | | +1 :green_heart: | blanks | 0m 0s | | The patch has no blanks issues. | | +1 :green_heart: | checkstyle | 1m 1s | | the patch passed | | +1 :green_heart: | mvnsite | 1m 28s | | the patch passed | | +1 :green_heart: | javadoc | 1m 0s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 1m 29s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 3m 23s | | the patch passed | | +1 :green_heart: | shadedclient | 22m 55s | | patch has no errors when building and testing our client artifacts. | _ Other Tests _ | | +1 :green_heart: | unit | 240m 47s | | hadoop-hdfs in the patch passed. | | +1 :green_heart: | asflicense | 1m 14s | | The patch does not generate ASF License warnings. | | | | 349m 50s | | | | Subsystem | Report/Notes | |--:|:-| | Docker | ClientAPI=1.41 ServerAPI=1.41 base: https://ci-hadoop.apache.org/job/hadoop-multibranch/job/PR-4561/2/artifact/out/Dockerfile | | GITHUB PR | https://github.com/apache/hadoop/pull/4561 | | Optional Tests | dupname asflicense compile javac javadoc mvninstall mvnsite unit shadedclient spotbugs checkstyle codespell detsecrets | | uname | Linux 7510c2cdcf41 4.15.0-175-generic #184-Ubuntu SMP Thu Mar 24 17:48:36 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux | | Build tool | maven | | Personality | dev-support/bin/hadoop.sh | | git revision | trunk / b127d81e1e28476267bdd32d25c4a409f1b255de | | Default Java | Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | Multi-JDK versions | /usr/lib/jvm/java-11-openjdk-amd64:Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 /usr/lib/jvm/java-8-openjdk-amd64:Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | Test Results | https://ci-hadoop.apache.org/job/hadoop-multibranch/job/PR-4561/2/testReport/ | | Max. process+thread count | 3726 (vs. ulimit of 5500) | | modules | C: hadoop-hdfs-project/hadoop-hdfs U: hadoop-hdfs-project
[jira] [Work logged] (HDFS-16605) Improve Code With Lambda in hadoop-hdfs-rbf moudle
[ https://issues.apache.org/jira/browse/HDFS-16605?focusedWorklogId=791032&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791032 ] ASF GitHub Bot logged work on HDFS-16605: - Author: ASF GitHub Bot Created on: 14/Jul/22 16:22 Start Date: 14/Jul/22 16:22 Worklog Time Spent: 10m Work Description: hadoop-yetus commented on PR #4375: URL: https://github.com/apache/hadoop/pull/4375#issuecomment-1184641712 :confetti_ball: **+1 overall** | Vote | Subsystem | Runtime | Logfile | Comment | |::|--:|:|::|:---:| | +0 :ok: | reexec | 0m 49s | | Docker mode activated. | _ Prechecks _ | | +1 :green_heart: | dupname | 0m 1s | | No case conflicting files found. | | +0 :ok: | codespell | 0m 0s | | codespell was not available. | | +0 :ok: | detsecrets | 0m 0s | | detect-secrets was not available. | | +1 :green_heart: | @author | 0m 0s | | The patch does not contain any @author tags. | | +1 :green_heart: | test4tests | 0m 0s | | The patch appears to include 10 new or modified test files. | _ trunk Compile Tests _ | | +1 :green_heart: | mvninstall | 40m 0s | | trunk passed | | +1 :green_heart: | compile | 0m 54s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | compile | 0m 48s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | checkstyle | 0m 41s | | trunk passed | | +1 :green_heart: | mvnsite | 0m 53s | | trunk passed | | +1 :green_heart: | javadoc | 0m 58s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 1m 7s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 1m 43s | | trunk passed | | +1 :green_heart: | shadedclient | 24m 8s | | branch has no errors when building and testing our client artifacts. | | -0 :warning: | patch | 24m 31s | | Used diff version of patch file. Binary files and potentially other changes not applied. Please rebase and squash commits if necessary. | _ Patch Compile Tests _ | | +1 :green_heart: | mvninstall | 0m 39s | | the patch passed | | +1 :green_heart: | compile | 0m 42s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javac | 0m 42s | | the patch passed | | +1 :green_heart: | compile | 0m 36s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | javac | 0m 36s | | the patch passed | | +1 :green_heart: | blanks | 0m 0s | | The patch has no blanks issues. | | +1 :green_heart: | checkstyle | 0m 22s | | the patch passed | | +1 :green_heart: | mvnsite | 0m 40s | | the patch passed | | +1 :green_heart: | javadoc | 0m 37s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 0m 54s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 1m 26s | | the patch passed | | +1 :green_heart: | shadedclient | 23m 17s | | patch has no errors when building and testing our client artifacts. | _ Other Tests _ | | +1 :green_heart: | unit | 39m 41s | | hadoop-hdfs-rbf in the patch passed. | | +1 :green_heart: | asflicense | 0m 44s | | The patch does not generate ASF License warnings. | | | | 143m 22s | | | | Subsystem | Report/Notes | |--:|:-| | Docker | ClientAPI=1.41 ServerAPI=1.41 base: https://ci-hadoop.apache.org/job/hadoop-multibranch/job/PR-4375/10/artifact/out/Dockerfile | | GITHUB PR | https://github.com/apache/hadoop/pull/4375 | | Optional Tests | dupname asflicense compile javac javadoc mvninstall mvnsite unit shadedclient spotbugs checkstyle codespell detsecrets | | uname | Linux b8a53f02 4.15.0-175-generic #184-Ubuntu SMP Thu Mar 24 17:48:36 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux | | Build tool | maven | | Personality | dev-support/bin/hadoop.sh | | git revision | trunk / 658dbdd9e3389dcc6e93449770081e9f15e17df7 | | Default Java | Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | Multi-JDK versions | /usr/lib/jvm/java-11-openjdk-amd64:Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 /usr/lib/jvm/java-8-openjdk-amd64:Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | Test Results | https://ci-hadoop.apache.org/job/hadoop-multibranch/job/PR-4375/10/testReport/ | | Max. process+thread count | 2057 (vs. ulimit of 5500) | | m
[jira] [Updated] (HDFS-16661) Improve Code With Lambda in AsyncLoggerSet class
[ https://issues.apache.org/jira/browse/HDFS-16661?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] ASF GitHub Bot updated HDFS-16661: -- Labels: pull-request-available (was: ) > Improve Code With Lambda in AsyncLoggerSet class > > > Key: HDFS-16661 > URL: https://issues.apache.org/jira/browse/HDFS-16661 > Project: Hadoop HDFS > Issue Type: Improvement >Reporter: ZanderXu >Assignee: ZanderXu >Priority: Major > Labels: pull-request-available > Time Spent: 10m > Remaining Estimate: 0h > > Improve Code With Lambda in AsyncLoggerSet class -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Work logged] (HDFS-16661) Improve Code With Lambda in AsyncLoggerSet class
[ https://issues.apache.org/jira/browse/HDFS-16661?focusedWorklogId=791014&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-791014 ] ASF GitHub Bot logged work on HDFS-16661: - Author: ASF GitHub Bot Created on: 14/Jul/22 15:25 Start Date: 14/Jul/22 15:25 Worklog Time Spent: 10m Work Description: ZanderXu opened a new pull request, #4565: URL: https://github.com/apache/hadoop/pull/4565 ### Description of PR Improve Code With Lambda in AsyncLoggerSet class Issue Time Tracking --- Worklog Id: (was: 791014) Remaining Estimate: 0h Time Spent: 10m > Improve Code With Lambda in AsyncLoggerSet class > > > Key: HDFS-16661 > URL: https://issues.apache.org/jira/browse/HDFS-16661 > Project: Hadoop HDFS > Issue Type: Improvement >Reporter: ZanderXu >Assignee: ZanderXu >Priority: Major > Time Spent: 10m > Remaining Estimate: 0h > > Improve Code With Lambda in AsyncLoggerSet class -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Created] (HDFS-16661) Improve Code With Lambda in AsyncLoggerSet class
ZanderXu created HDFS-16661: --- Summary: Improve Code With Lambda in AsyncLoggerSet class Key: HDFS-16661 URL: https://issues.apache.org/jira/browse/HDFS-16661 Project: Hadoop HDFS Issue Type: Improvement Reporter: ZanderXu Assignee: ZanderXu Improve Code With Lambda in AsyncLoggerSet class -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Comment Edited] (HDFS-16652) Upgrade jquery datatable version references to v1.10.19
[ https://issues.apache.org/jira/browse/HDFS-16652?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17566827#comment-17566827 ] Brahma Reddy Battula edited comment on HDFS-16652 at 7/14/22 1:00 PM: -- [~dmmkr] thanks for contributing . Committed to trunk (PR #4562). Can you update PR for branch-3.2 and branch-3.3 also..? was (Author: brahmareddy): [~dmmkr] thanks for contributing . Committed to trunk. Can you update PR for branch-3.2 and branch-3.3 also..? > Upgrade jquery datatable version references to v1.10.19 > --- > > Key: HDFS-16652 > URL: https://issues.apache.org/jira/browse/HDFS-16652 > Project: Hadoop HDFS > Issue Type: Improvement >Reporter: D M Murali Krishna Reddy >Assignee: D M Murali Krishna Reddy >Priority: Major > Labels: pull-request-available > Fix For: 3.4.0 > > Attachments: HDFS-16652.001.patch > > Time Spent: 50m > Remaining Estimate: 0h > > Upgrade jquery datatable version references in hdfs webapp to v1.10.19 -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Updated] (HDFS-16652) Upgrade jquery datatable version references to v1.10.19
[ https://issues.apache.org/jira/browse/HDFS-16652?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brahma Reddy Battula updated HDFS-16652: Fix Version/s: 3.4.0 > Upgrade jquery datatable version references to v1.10.19 > --- > > Key: HDFS-16652 > URL: https://issues.apache.org/jira/browse/HDFS-16652 > Project: Hadoop HDFS > Issue Type: Improvement >Reporter: D M Murali Krishna Reddy >Assignee: D M Murali Krishna Reddy >Priority: Major > Labels: pull-request-available > Fix For: 3.4.0 > > Attachments: HDFS-16652.001.patch > > Time Spent: 50m > Remaining Estimate: 0h > > Upgrade jquery datatable version references in hdfs webapp to v1.10.19 -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Commented] (HDFS-16652) Upgrade jquery datatable version references to v1.10.19
[ https://issues.apache.org/jira/browse/HDFS-16652?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17566827#comment-17566827 ] Brahma Reddy Battula commented on HDFS-16652: - [~dmmkr] thanks for contributing . Committed to trunk. Can you update PR for branch-3.2 and branch-3.3 also..? > Upgrade jquery datatable version references to v1.10.19 > --- > > Key: HDFS-16652 > URL: https://issues.apache.org/jira/browse/HDFS-16652 > Project: Hadoop HDFS > Issue Type: Improvement >Reporter: D M Murali Krishna Reddy >Assignee: D M Murali Krishna Reddy >Priority: Major > Labels: pull-request-available > Attachments: HDFS-16652.001.patch > > Time Spent: 50m > Remaining Estimate: 0h > > Upgrade jquery datatable version references in hdfs webapp to v1.10.19 -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Work logged] (HDFS-16652) Upgrade jquery datatable version references to v1.10.19
[ https://issues.apache.org/jira/browse/HDFS-16652?focusedWorklogId=790919&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-790919 ] ASF GitHub Bot logged work on HDFS-16652: - Author: ASF GitHub Bot Created on: 14/Jul/22 12:57 Start Date: 14/Jul/22 12:57 Worklog Time Spent: 10m Work Description: brahmareddybattula merged PR #4562: URL: https://github.com/apache/hadoop/pull/4562 Issue Time Tracking --- Worklog Id: (was: 790919) Time Spent: 50m (was: 40m) > Upgrade jquery datatable version references to v1.10.19 > --- > > Key: HDFS-16652 > URL: https://issues.apache.org/jira/browse/HDFS-16652 > Project: Hadoop HDFS > Issue Type: Improvement >Reporter: D M Murali Krishna Reddy >Assignee: D M Murali Krishna Reddy >Priority: Major > Labels: pull-request-available > Attachments: HDFS-16652.001.patch > > Time Spent: 50m > Remaining Estimate: 0h > > Upgrade jquery datatable version references in hdfs webapp to v1.10.19 -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Work logged] (HDFS-16652) Upgrade jquery datatable version references to v1.10.19
[ https://issues.apache.org/jira/browse/HDFS-16652?focusedWorklogId=790917&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-790917 ] ASF GitHub Bot logged work on HDFS-16652: - Author: ASF GitHub Bot Created on: 14/Jul/22 12:56 Start Date: 14/Jul/22 12:56 Worklog Time Spent: 10m Work Description: brahmareddybattula commented on PR #4562: URL: https://github.com/apache/hadoop/pull/4562#issuecomment-1184415875 +1 Issue Time Tracking --- Worklog Id: (was: 790917) Time Spent: 40m (was: 0.5h) > Upgrade jquery datatable version references to v1.10.19 > --- > > Key: HDFS-16652 > URL: https://issues.apache.org/jira/browse/HDFS-16652 > Project: Hadoop HDFS > Issue Type: Improvement >Reporter: D M Murali Krishna Reddy >Assignee: D M Murali Krishna Reddy >Priority: Major > Labels: pull-request-available > Attachments: HDFS-16652.001.patch > > Time Spent: 40m > Remaining Estimate: 0h > > Upgrade jquery datatable version references in hdfs webapp to v1.10.19 -- This message was sent by Atlassian Jira (v8.20.10#820010) - To unsubscribe, e-mail: hdfs-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: hdfs-issues-h...@hadoop.apache.org
[jira] [Work logged] (HDFS-13522) RBF: Support observer node from Router-Based Federation
[ https://issues.apache.org/jira/browse/HDFS-13522?focusedWorklogId=790806&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-790806 ] ASF GitHub Bot logged work on HDFS-13522: - Author: ASF GitHub Bot Created on: 14/Jul/22 08:35 Start Date: 14/Jul/22 08:35 Worklog Time Spent: 10m Work Description: hadoop-yetus commented on PR #4127: URL: https://github.com/apache/hadoop/pull/4127#issuecomment-1184160981 :confetti_ball: **+1 overall** | Vote | Subsystem | Runtime | Logfile | Comment | |::|--:|:|::|:---:| | +0 :ok: | reexec | 0m 51s | | Docker mode activated. | _ Prechecks _ | | +1 :green_heart: | dupname | 0m 1s | | No case conflicting files found. | | +0 :ok: | codespell | 0m 1s | | codespell was not available. | | +0 :ok: | detsecrets | 0m 1s | | detect-secrets was not available. | | +0 :ok: | buf | 0m 1s | | buf was not available. | | +0 :ok: | buf | 0m 1s | | buf was not available. | | +0 :ok: | xmllint | 0m 1s | | xmllint was not available. | | +1 :green_heart: | @author | 0m 0s | | The patch does not contain any @author tags. | | +1 :green_heart: | test4tests | 0m 0s | | The patch appears to include 12 new or modified test files. | _ trunk Compile Tests _ | | +0 :ok: | mvndep | 14m 28s | | Maven dependency ordering for branch | | +1 :green_heart: | mvninstall | 25m 6s | | trunk passed | | +1 :green_heart: | compile | 23m 2s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | compile | 20m 34s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | checkstyle | 4m 19s | | trunk passed | | +1 :green_heart: | mvnsite | 7m 48s | | trunk passed | | +1 :green_heart: | javadoc | 6m 28s | | trunk passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 6m 55s | | trunk passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 12m 27s | | trunk passed | | +1 :green_heart: | shadedclient | 22m 40s | | branch has no errors when building and testing our client artifacts. | _ Patch Compile Tests _ | | +0 :ok: | mvndep | 0m 33s | | Maven dependency ordering for patch | | +1 :green_heart: | mvninstall | 4m 12s | | the patch passed | | +1 :green_heart: | compile | 22m 9s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | cc | 22m 9s | | the patch passed | | +1 :green_heart: | javac | 22m 9s | | the patch passed | | +1 :green_heart: | compile | 20m 27s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | cc | 20m 27s | | the patch passed | | +1 :green_heart: | javac | 20m 27s | | the patch passed | | +1 :green_heart: | blanks | 0m 0s | | The patch has no blanks issues. | | -0 :warning: | checkstyle | 4m 49s | [/results-checkstyle-root.txt](https://ci-hadoop.apache.org/job/hadoop-multibranch/job/PR-4127/19/artifact/out/results-checkstyle-root.txt) | root: The patch generated 4 new + 339 unchanged - 1 fixed = 343 total (was 340) | | +1 :green_heart: | mvnsite | 7m 35s | | the patch passed | | +1 :green_heart: | javadoc | 6m 21s | | the patch passed with JDK Private Build-11.0.15+10-Ubuntu-0ubuntu0.20.04.1 | | +1 :green_heart: | javadoc | 6m 32s | | the patch passed with JDK Private Build-1.8.0_312-8u312-b07-0ubuntu1~20.04-b07 | | +1 :green_heart: | spotbugs | 12m 51s | | the patch passed | | +1 :green_heart: | shadedclient | 22m 32s | | patch has no errors when building and testing our client artifacts. | _ Other Tests _ | | +1 :green_heart: | unit | 18m 52s | | hadoop-common in the patch passed. | | +1 :green_heart: | unit | 3m 16s | | hadoop-hdfs-client in the patch passed. | | +1 :green_heart: | unit | 390m 4s | | hadoop-hdfs in the patch passed. | | +1 :green_heart: | unit | 37m 57s | | hadoop-hdfs-rbf in the patch passed. | | +1 :green_heart: | asflicense | 1m 50s | | The patch does not generate ASF License warnings. | | | | 709m 38s | | | | Subsystem | Report/Notes | |--:|:-| | Docker | ClientAPI=1.41 ServerAPI=1.41 base: https://ci-hadoop.apache.org/job/hadoop-multibranch/job/PR-4127/19/artifact/out/Dockerfile | | GITHUB PR | https://github.com/apache/hadoop/pull/4127 | | Optional Tests | dupname asflicense compile javac javadoc mvninstall mvnsite unit shade