This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch ozone-0.4
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/ozone-0.4 by this push:
     new f8f85fc  HDDS-1318. Fix MalformedTracerStateStringException on DN 
logs. Contributed by Xiaoyu Yao.
f8f85fc is described below

commit f8f85fcc315886fafa9904d8b05453c3f145edbd
Author: Xiaoyu Yao <x...@apache.org>
AuthorDate: Thu Mar 28 12:00:58 2019 -0700

    HDDS-1318. Fix MalformedTracerStateStringException on DN logs. Contributed 
by Xiaoyu Yao.
    
    This closes #641
    
    (cherry picked from commit ca5e4ce0367228bc0ac032c4654d3deb7493316b)
---
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  | 23 ++++++++--
 .../apache/hadoop/hdds/tracing/StringCodec.java    |  9 +++-
 .../hadoop/hdds/tracing/TestStringCodec.java       | 52 ++++++++++++++++++++++
 .../apache/hadoop/hdds/tracing/package-info.java   | 21 +++++++++
 .../ozone/container/ContainerTestHelper.java       |  1 -
 .../ozone/container/TestContainerReplication.java  |  2 -
 .../container/metrics/TestContainerMetrics.java    |  1 -
 .../container/ozoneimpl/TestOzoneContainer.java    | 46 -------------------
 .../ozoneimpl/TestOzoneContainerWithTLS.java       |  1 -
 .../ozoneimpl/TestSecureOzoneContainer.java        |  1 -
 .../container/server/TestContainerServer.java      |  2 -
 .../hadoop/ozone/ozShell/TestOzoneShell.java       | 13 ++++++
 12 files changed, 114 insertions(+), 58 deletions(-)

diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index c068046..76e3b46 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.tracing.GrpcClientInterceptor;
+import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -136,7 +138,8 @@ public class XceiverClientGrpc extends XceiverClientSpi {
     NettyChannelBuilder channelBuilder = NettyChannelBuilder.forAddress(dn
             .getIpAddress(), port).usePlaintext()
             .maxInboundMessageSize(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE)
-            .intercept(new ClientCredentialInterceptor(userName, 
encodedToken));
+            .intercept(new ClientCredentialInterceptor(userName, encodedToken),
+                new GrpcClientInterceptor());
     if (secConfig.isGrpcTlsEnabled()) {
       File trustCertCollectionFile = secConfig.getTrustStoreFile();
       File privateKeyFile = secConfig.getClientPrivateKeyFile();
@@ -204,7 +207,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
       ContainerCommandRequestProto request) throws IOException {
     try {
       XceiverClientReply reply;
-      reply = sendCommandWithRetry(request, null);
+      reply = sendCommandWithTraceIDAndRetry(request, null);
       ContainerCommandResponseProto responseProto = reply.getResponse().get();
       return responseProto;
     } catch (ExecutionException | InterruptedException e) {
@@ -217,7 +220,21 @@ public class XceiverClientGrpc extends XceiverClientSpi {
       ContainerCommandRequestProto request, List<DatanodeDetails> excludeDns)
       throws IOException {
     Preconditions.checkState(HddsUtils.isReadOnly(request));
-    return sendCommandWithRetry(request, excludeDns);
+    return sendCommandWithTraceIDAndRetry(request, excludeDns);
+  }
+
+  private XceiverClientReply sendCommandWithTraceIDAndRetry(
+      ContainerCommandRequestProto request, List<DatanodeDetails> excludeDns)
+      throws IOException {
+    try (Scope scope = GlobalTracer.get()
+        .buildSpan("XceiverClientGrpc." + request.getCmdType().name())
+        .startActive(true)) {
+      ContainerCommandRequestProto finalPayload =
+          ContainerCommandRequestProto.newBuilder(request)
+              .setTraceID(TracingUtil.exportCurrentSpan())
+              .build();
+      return sendCommandWithRetry(finalPayload, excludeDns);
+    }
   }
 
   private XceiverClientReply sendCommandWithRetry(
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java
index ea88a7f..03365cf 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/StringCodec.java
@@ -25,20 +25,27 @@ import 
io.jaegertracing.internal.exceptions.MalformedTracerStateStringException;
 import io.jaegertracing.internal.exceptions.TraceIdOutOfBoundException;
 import io.jaegertracing.spi.Codec;
 import io.opentracing.propagation.Format;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
- * A jaeger codec to save the current tracing context t a string.
+ * A jaeger codec to save the current tracing context as a string.
  */
 public class StringCodec implements Codec<StringBuilder> {
 
+  public static final Logger LOG  = LoggerFactory.getLogger(StringCodec.class);
   public static final StringFormat FORMAT = new StringFormat();
 
   @Override
   public JaegerSpanContext extract(StringBuilder s) {
+    if (s == null) {
+      throw new EmptyTracerStateStringException();
+    }
     String value = s.toString();
     if (value != null && !value.equals("")) {
       String[] parts = value.split(":");
       if (parts.length != 4) {
+        LOG.trace("MalformedTracerStateString: {}", value);
         throw new MalformedTracerStateStringException(value);
       } else {
         String traceId = parts[0];
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
new file mode 100644
index 0000000..10724ab
--- /dev/null
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.tracing;
+
+import io.jaegertracing.internal.JaegerSpanContext;
+import io.jaegertracing.internal.exceptions.EmptyTracerStateStringException;
+import 
io.jaegertracing.internal.exceptions.MalformedTracerStateStringException;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+class TestStringCodec {
+
+  @Test
+  void testExtract() throws Exception {
+    StringCodec codec = new StringCodec();
+
+    LambdaTestUtils.intercept(EmptyTracerStateStringException.class,
+        () -> codec.extract(null));
+
+    StringBuilder sb = new StringBuilder().append("123");
+    LambdaTestUtils.intercept(MalformedTracerStateStringException.class,
+        "String does not match tracer state format",
+        () -> codec.extract(sb));
+
+    sb.append(":456:789");
+    LambdaTestUtils.intercept(MalformedTracerStateStringException.class,
+        "String does not match tracer state format",
+        () -> codec.extract(sb));
+    sb.append(":66");
+    JaegerSpanContext context = codec.extract(sb);
+    String expectedContextString = new String("123:456:789:66");
+    assertTrue(context.getTraceId().equals("123"));
+    assertTrue(context.toString().equals(expectedContextString));
+  }
+}
\ No newline at end of file
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java
new file mode 100644
index 0000000..18e1200
--- /dev/null
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/package-info.java
@@ -0,0 +1,21 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.tracing;
+/**
+ Test cases for ozone tracing.
+ */
\ No newline at end of file
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index f08a07f..7f2d93d 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -545,7 +545,6 @@ public final class ContainerTestHelper {
    */
   public static void verifyGetBlock(ContainerCommandRequestProto request,
       ContainerCommandResponseProto response, int expectedChunksCount) {
-    Assert.assertEquals(request.getTraceID(), response.getTraceID());
     Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
     Assert.assertEquals(expectedChunksCount,
         response.getGetBlock().getBlockData().getChunksCount());
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
index 5153b41..84d6fd5 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestContainerReplication.java
@@ -112,8 +112,6 @@ public class TestContainerReplication {
 
     Assert.assertNotNull(response);
     Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertTrue(
-        putBlockRequest.getTraceID().equals(response.getTraceID()));
 
     HddsDatanodeService destinationDatanode =
         chooseDatanodeWithoutContainer(sourcePipelines,
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index e06a9e9..43c354c 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -123,7 +123,6 @@ public class TestContainerMetrics {
       ContainerCommandRequestProto request = ContainerTestHelper
           .getCreateContainerRequest(containerID, pipeline);
       ContainerCommandResponseProto response = client.sendCommand(request);
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
           response.getResult());
 
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 27777de..eeb6c53 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -158,7 +158,6 @@ public class TestOzoneContainer {
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
       // Put Block
       putBlockRequest = ContainerTestHelper.getPutBlockRequest(
@@ -168,8 +167,6 @@ public class TestOzoneContainer {
       response = client.sendCommand(putBlockRequest);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
-      Assert.assertTrue(putBlockRequest.getTraceID()
-          .equals(response.getTraceID()));
 
       // Get Block
       request = ContainerTestHelper.
@@ -187,7 +184,6 @@ public class TestOzoneContainer {
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
       //Delete Chunk
       request = ContainerTestHelper.getDeleteChunkRequest(
@@ -196,7 +192,6 @@ public class TestOzoneContainer {
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
       //Update an existing container
       Map<String, String> containerUpdate = new HashMap<String, String>();
@@ -259,8 +254,6 @@ public class TestOzoneContainer {
       ContainerProtos.ContainerCommandResponseProto response
           = client.sendCommand(smallFileRequest);
       Assert.assertNotNull(response);
-      Assert.assertTrue(smallFileRequest.getTraceID()
-          .equals(response.getTraceID()));
 
       final ContainerProtos.ContainerCommandRequestProto getSmallFileRequest
           = ContainerTestHelper.getReadSmallFileRequest(client.getPipeline(),
@@ -310,8 +303,6 @@ public class TestOzoneContainer {
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
           response.getResult());
-      Assert.assertTrue(
-          putBlockRequest.getTraceID().equals(response.getTraceID()));
 
       // Close the contianer.
       request = ContainerTestHelper.getCloseContainer(
@@ -319,7 +310,6 @@ public class TestOzoneContainer {
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
 
       // Assert that none of the write  operations are working after close.
@@ -330,8 +320,6 @@ public class TestOzoneContainer {
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
           response.getResult());
-      Assert.assertTrue(
-          writeChunkRequest.getTraceID().equals(response.getTraceID()));
 
       // Read chunk must work on a closed container.
       request = ContainerTestHelper.getReadChunkRequest(client.getPipeline(),
@@ -339,16 +327,12 @@ public class TestOzoneContainer {
       response = client.sendCommand(request);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
-
 
       // Put block will fail on a closed container.
       response = client.sendCommand(putBlockRequest);
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
           response.getResult());
-      Assert.assertTrue(putBlockRequest.getTraceID()
-          .equals(response.getTraceID()));
 
       // Get block must work on the closed container.
       request = ContainerTestHelper.getBlockRequest(client.getPipeline(),
@@ -366,7 +350,6 @@ public class TestOzoneContainer {
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.CLOSED_CONTAINER_IO,
           response.getResult());
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
     } finally {
       if (client != null) {
         client.close();
@@ -407,8 +390,6 @@ public class TestOzoneContainer {
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
           response.getResult());
-      Assert.assertTrue(
-          putBlockRequest.getTraceID().equals(response.getTraceID()));
 
       // Container cannot be deleted because force flag is set to false and
       // the container is still open
@@ -419,7 +400,6 @@ public class TestOzoneContainer {
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER,
           response.getResult());
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
       // Container can be deleted, by setting force flag, even with out closing
       request = ContainerTestHelper.getDeleteContainer(
@@ -429,7 +409,6 @@ public class TestOzoneContainer {
       Assert.assertNotNull(response);
       Assert.assertEquals(ContainerProtos.Result.SUCCESS,
           response.getResult());
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
     } finally {
       if (client != null) {
@@ -524,7 +503,6 @@ public class TestOzoneContainer {
     ContainerProtos.ContainerCommandResponseProto response =
         client.sendCommand(request);
     Assert.assertNotNull(response);
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
   }
 
   public static ContainerProtos.ContainerCommandRequestProto
@@ -539,30 +517,6 @@ public class TestOzoneContainer {
         client.sendCommand(writeChunkRequest);
     Assert.assertNotNull(response);
     Assert.assertEquals(ContainerProtos.Result.SUCCESS, response.getResult());
-    Assert.assertTrue(response.getTraceID().equals(response.getTraceID()));
     return writeChunkRequest;
   }
-
-  static void runRequestWithoutTraceId(
-          long containerID, XceiverClientSpi client) throws Exception {
-    try {
-      client.connect();
-      createContainerForTesting(client, containerID);
-      BlockID blockID = ContainerTestHelper.getTestBlockID(containerID);
-      final ContainerProtos.ContainerCommandRequestProto smallFileRequest
-              = ContainerTestHelper.getWriteSmallFileRequest(
-              client.getPipeline(), blockID, 1024);
-
-      ContainerProtos.ContainerCommandResponseProto response
-              = client.sendCommand(smallFileRequest);
-      Assert.assertNotNull(response);
-      Assert.assertTrue(smallFileRequest.getTraceID()
-              .equals(response.getTraceID()));
-    } finally {
-      if (client != null) {
-        client.close();
-      }
-    }
-  }
-
 }
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
index fcfc762..a6e8dd0 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerWithTLS.java
@@ -178,7 +178,6 @@ public class TestOzoneContainerWithTLS {
     ContainerProtos.ContainerCommandResponseProto response =
         client.sendCommand(request);
     Assert.assertNotNull(response);
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
   }
 
   private StateContext getContext(DatanodeDetails datanodeDetails) {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
index 6cb1ebe..07836ea 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestSecureOzoneContainer.java
@@ -212,7 +212,6 @@ public class TestSecureOzoneContainer {
     ContainerProtos.ContainerCommandResponseProto response =
         client.sendCommand(request);
     Assert.assertNotNull(response);
-    Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
   }
 
   private StateContext getContext(DatanodeDetails datanodeDetails) {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index fac7b50..4794345 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -188,7 +188,6 @@ public class TestContainerServer {
       Assert.assertNotNull(request.getTraceID());
 
       ContainerCommandResponseProto response = client.sendCommand(request);
-      Assert.assertEquals(request.getTraceID(), response.getTraceID());
     } finally {
       if (client != null) {
         client.close();
@@ -245,7 +244,6 @@ public class TestContainerServer {
           ContainerTestHelper.getCreateContainerRequest(
               ContainerTestHelper.getTestContainerID(), pipeline);
       ContainerCommandResponseProto response = client.sendCommand(request);
-      Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
       Assert.assertEquals(ContainerProtos.Result.SUCCESS, 
response.getResult());
     } finally {
       if (client != null) {
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
index 7f77f87..9cc9659 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.cli.MissingSubcommandException;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.tracing.StringCodec;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneAcl;
@@ -82,9 +83,12 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.slf4j.event.Level.TRACE;
+
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Rule;
@@ -908,6 +912,9 @@ public class TestOzoneShell {
 
   @Test
   public void testGetKey() throws Exception {
+    GenericTestUtils.LogCapturer logs = GenericTestUtils.LogCapturer
+        .captureLogs(StringCodec.LOG);
+    GenericTestUtils.setLogLevel(StringCodec.LOG, TRACE);
     LOG.info("Running testGetKey");
     String keyName = "key" + RandomStringUtils.randomNumeric(5);
     OzoneBucket bucket = creatBucket();
@@ -919,6 +926,9 @@ public class TestOzoneShell {
         bucket.createKey(keyName, dataStr.length());
     keyOutputStream.write(dataStr.getBytes());
     keyOutputStream.close();
+    assertFalse("put key without malformed tracing",
+        logs.getOutput().contains("MalformedTracerStateString"));
+    logs.clearOutput();
 
     String tmpPath = baseDir.getAbsolutePath() + "/testfile-"
         + UUID.randomUUID().toString();
@@ -926,6 +936,9 @@ public class TestOzoneShell {
         url + "/" + volumeName + "/" + bucketName + "/" + keyName,
         tmpPath};
     execute(shell, args);
+    assertFalse("get key without malformed tracing",
+        logs.getOutput().contains("MalformedTracerStateString"));
+    logs.clearOutput();
 
     byte[] dataBytes = new byte[dataStr.length()];
     try (FileInputStream randFile = new FileInputStream(new File(tmpPath))) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to