Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 a319df412 -> 68ee13e4f


HADOOP-12658. Clear javadoc and check style issues around DomainSocket. 
Contributed by Kai Zheng

(cherry picked from commit 778146eaae5b1e17928a1f26fb1e46536a6ee510)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68ee13e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68ee13e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68ee13e4

Branch: refs/heads/branch-2.8
Commit: 68ee13e4f52b22a320b9514c118db68fb6e162bb
Parents: a319df4
Author: Uma Mahesh <umamah...@apache.org>
Authored: Mon Jan 4 14:32:09 2016 -0800
Committer: Uma Mahesh <umamah...@apache.org>
Committed: Mon Jan 4 15:14:14 2016 -0800

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../main/java/org/apache/hadoop/io/IOUtils.java |  5 +-
 .../apache/hadoop/net/unix/DomainSocket.java    | 59 +++++---------------
 .../apache/hadoop/hdfs/BlockReaderFactory.java  |  2 +-
 .../hdfs/shortcircuit/DfsClientShmManager.java  |  2 +-
 .../hdfs/server/datanode/DataXceiver.java       |  9 +--
 .../server/datanode/ShortCircuitRegistry.java   | 14 ++++-
 7 files changed, 37 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ee13e4/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c5ba7f2..cd69dcc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -367,6 +367,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12686. Update FileSystemShell documentation to mention the meaning
     of each columns of fs -du. (Daisuke Kobayashi via aajisaka)
 
+    HADOOP-12658. Clear javadoc and check style issues around DomainSocket
+    (Kai Zheng via umamahesh)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ee13e4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index d4d5862..70e407b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -53,7 +53,8 @@ public class IOUtils {
    * @param close whether or not close the InputStream and 
    * OutputStream at the end. The streams are closed in the finally clause.  
    */
-  public static void copyBytes(InputStream in, OutputStream out, int buffSize, 
boolean close) 
+  public static void copyBytes(InputStream in, OutputStream out,
+                               int buffSize, boolean close)
     throws IOException {
     try {
       copyBytes(in, out, buffSize);
@@ -192,7 +193,7 @@ public class IOUtils {
    * @throws IOException if it could not read requested number of bytes 
    * for any reason (including EOF)
    */
-  public static void readFully(InputStream in, byte buf[],
+  public static void readFully(InputStream in, byte[] buf,
       int off, int len) throws IOException {
     int toRead = len;
     while (toRead > 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ee13e4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
----------------------------------------------------------------------
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
index f1035e2..8379fd1 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocket.java
@@ -63,7 +63,8 @@ public class DomainSocket implements Closeable {
   static Log LOG = LogFactory.getLog(DomainSocket.class);
 
   /**
-   * True only if we should validate the paths used in {@link 
DomainSocket#bind()}
+   * True only if we should validate the paths used in
+   * {@link DomainSocket#bindAndListen(String)}
    */
   private static boolean validateBindPaths = true;
 
@@ -220,11 +221,11 @@ public class DomainSocket implements Closeable {
    *
    * This method can only be used on sockets that were bound with bind().
    *
-   * @return                              The new connection.
-   * @throws IOException                  If there was an I/O error
-   *                                      performing the accept-- such as the
-   *                                      socket being closed from under us.
-   * @throws SocketTimeoutException       If the accept timed out.
+   * @return                The new connection.
+   * @throws IOException    If there was an I/O error performing the accept--
+   *                        such as the socket being closed from under us.
+   *                        Particularly when the accept is timed out, it 
throws
+   *                        SocketTimeoutException.
    */
   public DomainSocket accept() throws IOException {
     refCount.reference();
@@ -238,13 +239,15 @@ public class DomainSocket implements Closeable {
     }
   }
 
-  private static native int connect0(String path);
+  private static native int connect0(String path) throws IOException;
 
   /**
    * Create a new DomainSocket connected to the given path.
    *
-   * @param path         The path to connect to.
-   * @return             The new DomainSocket.
+   * @param path              The path to connect to.
+   * @throws IOException      If there was an I/O error performing the connect.
+   *
+   * @return                  The new DomainSocket.
    */
   public static DomainSocket connect(String path) throws IOException {
     if (loadingFailureReason != null) {
@@ -425,47 +428,11 @@ public class DomainSocket implements Closeable {
 
   private static native int receiveFileDescriptors0(int fd,
       FileDescriptor[] descriptors,
-      byte jbuf[], int offset, int length) throws IOException;
-
-  /**
-   * Receive some FileDescriptor objects from the process on the other side of
-   * this socket.
-   *
-   * @param descriptors       (output parameter) Array of FileDescriptors.
-   *                          We will fill as many slots as possible with file
-   *                          descriptors passed from the remote process.  The
-   *                          other slots will contain NULL.
-   * @param jbuf              (output parameter) Buffer to read into.
-   *                          The UNIX domain sockets API requires you to read
-   *                          at least one byte from the remote process, even
-   *                          if all you care about is the file descriptors
-   *                          you will receive.
-   * @param offset            Offset into the byte buffer to load data
-   * @param length            Length of the byte buffer to use for data
-   *
-   * @return                  The number of bytes read.  This will be -1 if we
-   *                          reached EOF (similar to SocketInputStream);
-   *                          otherwise, it will be positive.
-   * @throws                  IOException if there was an I/O error.
-   */
-  public int receiveFileDescriptors(FileDescriptor[] descriptors,
-      byte jbuf[], int offset, int length) throws IOException {
-    refCount.reference();
-    boolean exc = true;
-    try {
-      int nBytes = receiveFileDescriptors0(fd, descriptors, jbuf, offset, 
length);
-      exc = false;
-      return nBytes;
-    } finally {
-      unreference(exc);
-    }
-  }
+      byte[] buf, int offset, int length) throws IOException;
 
   /**
    * Receive some FileDescriptor objects from the process on the other side of
    * this socket, and wrap them in FileInputStream objects.
-   *
-   * See {@link DomainSocket#recvFileInputStreams(ByteBuffer)}
    */
   public int recvFileInputStreams(FileInputStream[] streams, byte buf[],
         int offset, int length) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ee13e4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index c7e2a7d..5c7bbd7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -591,7 +591,7 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
     switch (resp.getStatus()) {
     case SUCCESS:
       byte buf[] = new byte[1];
-      FileInputStream fis[] = new FileInputStream[2];
+      FileInputStream[] fis = new FileInputStream[2];
       sock.recvFileInputStreams(fis, buf, 0, buf.length);
       ShortCircuitReplica replica = null;
       try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ee13e4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
index c421fe8..6f8a8fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
@@ -168,7 +168,7 @@ public class DfsClientShmManager implements Closeable {
       case SUCCESS:
         DomainSocket sock = peer.getDomainSocket();
         byte buf[] = new byte[1];
-        FileInputStream fis[] = new FileInputStream[1];
+        FileInputStream[] fis = new FileInputStream[1];
         if (sock.recvFileInputStreams(fis, buf, 0, buf.length) < 0) {
           throw new EOFException("got EOF while trying to transfer the " +
               "file descriptor for the shared memory segment.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ee13e4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index f9c68cd..a54214f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -425,12 +425,12 @@ class DataXceiver extends Receiver implements Runnable {
       throws IOException {
     DataNodeFaultInjector.get().sendShortCircuitShmResponse();
     ShortCircuitShmResponseProto.newBuilder().setStatus(SUCCESS).
-        setId(PBHelperClient.convert(shmInfo.shmId)).build().
+        setId(PBHelperClient.convert(shmInfo.getShmId())).build().
         writeDelimitedTo(socketOut);
     // Send the file descriptor for the shared memory segment.
     byte buf[] = new byte[] { (byte)0 };
     FileDescriptor shmFdArray[] =
-        new FileDescriptor[] { shmInfo.stream.getFD() };
+        new FileDescriptor[] {shmInfo.getFileStream().getFD()};
     sock.sendFileDescriptors(shmFdArray, buf, 0, buf.length);
   }
 
@@ -471,7 +471,8 @@ class DataXceiver extends Receiver implements Runnable {
               "cliID: %s, src: 127.0.0.1, dest: 127.0.0.1, " +
               "op: REQUEST_SHORT_CIRCUIT_SHM," +
               " shmId: %016x%016x, srvID: %s, success: true",
-              clientName, shmInfo.shmId.getHi(), shmInfo.shmId.getLo(),
+              clientName, shmInfo.getShmId().getHi(),
+              shmInfo.getShmId().getLo(),
               datanode.getDatanodeUuid()));
         } else {
           BlockSender.ClientTraceLog.info(String.format(
@@ -490,7 +491,7 @@ class DataXceiver extends Receiver implements Runnable {
         // bad behavior inside the poll() call.  See HADOOP-11802 for details.
         try {
           LOG.warn("Failed to send success response back to the client.  " +
-              "Shutting down socket for " + shmInfo.shmId + ".");
+              "Shutting down socket for " + shmInfo.getShmId() + ".");
           sock.shutdown();
         } catch (IOException e) {
           LOG.warn("Failed to shut down socket in error handler", e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68ee13e4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
index b32c0d1..52856af 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
@@ -165,7 +165,7 @@ public class ShortCircuitRegistry {
             DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS +
             " was set to " + interruptCheck);
       }
-      String shmPaths[] =
+      String[] shmPaths =
           conf.getTrimmedStrings(DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS);
       if (shmPaths.length == 0) {
         shmPaths =
@@ -263,14 +263,22 @@ public class ShortCircuitRegistry {
   }
 
   public static class NewShmInfo implements Closeable {
-    public final ShmId shmId;
-    public final FileInputStream stream;
+    private final ShmId shmId;
+    private final FileInputStream stream;
 
     NewShmInfo(ShmId shmId, FileInputStream stream) {
       this.shmId = shmId;
       this.stream = stream;
     }
 
+    public ShmId getShmId() {
+      return shmId;
+    }
+
+    public FileInputStream getFileStream() {
+      return stream;
+    }
+
     @Override
     public void close() throws IOException {
       stream.close();

Reply via email to