Update 3.0.0-alpha3 changes, release notes, jdiff.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cd612ba Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cd612ba Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cd612ba Branch: refs/heads/YARN-2915 Commit: 2cd612ba8e3b84ddf41acf7b1beb0a4757a2465b Parents: 16ad896 Author: Andrew Wang <w...@apache.org> Authored: Fri May 26 14:14:38 2017 -0700 Committer: Andrew Wang <w...@apache.org> Committed: Fri May 26 14:14:38 2017 -0700 ---------------------------------------------------------------------- .../3.0.0-alpha3/CHANGES.3.0.0-alpha3.md | 71 ++++ .../3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md | 22 ++ .../jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml | 326 +++++++++++++++++++ 3 files changed, 419 insertions(+) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd612ba/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md new file mode 100644 index 0000000..61d63e0 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/CHANGES.3.0.0-alpha3.md @@ -0,0 +1,71 @@ + +<!--- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--> +# "Apache Hadoop" Changelog + +## Release 3.0.0-alpha3 - 2017-05-25 + +### INCOMPATIBLE CHANGES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | + + +### IMPORTANT ISSUES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | + + +### NEW FEATURES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | + + +### IMPROVEMENTS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | + + +### BUG FIXES: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | +| [YARN-6336](https://issues.apache.org/jira/browse/YARN-6336) | Jenkins report YARN new UI build failure | Blocker | . | Junping Du | Sunil G | +| [YARN-6278](https://issues.apache.org/jira/browse/YARN-6278) | Enforce to use correct node and npm version in new YARN-UI build | Critical | . | Sunil G | Sunil G | + + +### TESTS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | + + +### SUB-TASKS: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | + + +### OTHER: + +| JIRA | Summary | Priority | Component | Reporter | Contributor | +|:---- |:---- | :--- |:---- |:---- |:---- | + http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd612ba/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md ---------------------------------------------------------------------- diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md new file mode 100644 index 0000000..bda1807 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/site/markdown/release/3.0.0-alpha3/RELEASENOTES.3.0.0-alpha3.md @@ -0,0 +1,22 @@ + +<!--- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--> +# "Apache Hadoop" 3.0.0-alpha3 Release Notes + +These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements. + http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cd612ba/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml new file mode 100644 index 0000000..cadf733 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_3.0.0-alpha3.xml @@ -0,0 +1,326 @@ +<?xml version="1.0" encoding="iso-8859-1" standalone="no"?> +<!-- Generated by the JDiff Javadoc doclet --> +<!-- (http://www.jdiff.org) --> +<!-- on Thu May 25 05:19:31 UTC 2017 --> + +<api + xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance' + xsi:noNamespaceSchemaLocation='api.xsd' + name="Apache Hadoop HDFS 3.0.0-alpha3" + jdversion="1.0.9"> + +<!-- Command line arguments = -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -verbose -classpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/classes:/build/source/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-3.0.0-alpha3.jar:/usr/lib/jvm/java-8-oracle/lib/tools.jar:/build/source/hadoop-common-project/hadoop-auth/target/hadoop-auth-3.0.0-alpha3.jar:/maven/org/slf4j/slf4j-api/1.7.10/slf4j-api-1.7.10.jar:/maven/org/apache/httpcomponents/httpclient/4.5.2/httpclient-4.5.2.jar:/maven/org/apache/httpcomponents/httpcore/4.4.4/httpcore-4.4.4.jar:/maven/com/nimbusds/nimbus-jose-jwt/3.9/nimbus-jose-jwt-3.9.jar:/maven/net/jcip/jcip-annotations/1.0/jcip-annotations-1.0.jar:/maven/net/minidev/json-smart/1.1.1/json-smart-1.1.1.jar:/maven/org/apache/zookeeper/zookeeper/3.4.6/zookeep er-3.4.6.jar:/maven/jline/jline/0.9.94/jline-0.9.94.jar:/maven/org/apache/curator/curator-framework/2.7.1/curator-framework-2.7.1.jar:/maven/org/apache/kerby/kerb-simplekdc/1.0.0-RC2/kerb-simplekdc-1.0.0-RC2.jar:/maven/org/apache/kerby/kerby-config/1.0.0-RC2/kerby-config-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-core/1.0.0-RC2/kerb-core-1.0.0-RC2.jar:/maven/org/apache/kerby/kerby-asn1/1.0.0-RC2/kerby-asn1-1.0.0-RC2.jar:/maven/org/apache/kerby/kerby-pkix/1.0.0-RC2/kerby-pkix-1.0.0-RC2.jar:/maven/org/apache/kerby/kerby-util/1.0.0-RC2/kerby-util-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-client/1.0.0-RC2/kerb-client-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-common/1.0.0-RC2/kerb-common-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-util/1.0.0-RC2/kerb-util-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-crypto/1.0.0-RC2/kerb-crypto-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-server/1.0.0-RC2/kerb-server-1.0.0-RC2.jar:/maven/org/apache/kerby/kerb-identity/1.0.0-RC2/kerb-identity-1.0.0-RC2.jar:/ maven/org/apache/kerby/kerb-admin/1.0.0-RC2/kerb-admin-1.0.0-RC2.jar:/build/source/hadoop-common-project/hadoop-common/target/hadoop-common-3.0.0-alpha3.jar:/maven/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/maven/commons-net/commons-net/3.1/commons-net-3.1.jar:/maven/commons-collections/commons-collections/3.2.2/commons-collections-3.2.2.jar:/maven/org/eclipse/jetty/jetty-servlet/9.3.11.v20160721/jetty-servlet-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-security/9.3.11.v20160721/jetty-security-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-webapp/9.3.11.v20160721/jetty-webapp-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-xml/9.3.11.v20160721/jetty-xml-9.3.11.v20160721.jar:/maven/javax/servlet/jsp/jsp-api/2.1/jsp-api-2.1.jar:/maven/com/sun/jersey/jersey-servlet/1.19/jersey-servlet-1.19.jar:/maven/com/sun/jersey/jersey-json/1.19/jersey-json-1.19.jar:/maven/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/maven/com/sun/xml/bind/jaxb-impl/2.2. 3-1/jaxb-impl-2.2.3-1.jar:/maven/javax/xml/bind/jaxb-api/2.2.11/jaxb-api-2.2.11.jar:/maven/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/maven/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/maven/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/maven/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/maven/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/maven/commons-beanutils/commons-beanutils/1.9.3/commons-beanutils-1.9.3.jar:/maven/org/apache/commons/commons-configuration2/2.1/commons-configuration2-2.1.jar:/maven/org/apache/commons/commons-lang3/3.3.2/commons-lang3-3.3.2.jar:/maven/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/maven/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/maven/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/maven/com/google/re2j/re2j/1.0/re2j-1.0.jar:/maven/com/google/ code/gson/gson/2.2.4/gson-2.2.4.jar:/maven/com/jcraft/jsch/0.1.51/jsch-0.1.51.jar:/maven/org/apache/curator/curator-client/2.7.1/curator-client-2.7.1.jar:/maven/org/apache/curator/curator-recipes/2.7.1/curator-recipes-2.7.1.jar:/maven/com/google/code/findbugs/jsr305/3.0.0/jsr305-3.0.0.jar:/maven/org/apache/commons/commons-compress/1.4.1/commons-compress-1.4.1.jar:/maven/org/tukaani/xz/1.0/xz-1.0.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-3.0.0-alpha3.jar:/maven/com/squareup/okhttp/okhttp/2.4.0/okhttp-2.4.0.jar:/maven/com/squareup/okio/okio/1.4.0/okio-1.4.0.jar:/maven/com/fasterxml/jackson/core/jackson-annotations/2.7.8/jackson-annotations-2.7.8.jar:/maven/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/maven/org/eclipse/jetty/jetty-server/9.3.11.v20160721/jetty-server-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-http/9.3.11.v20160721/jetty-http-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-io/9.3.11.v20160721/jetty-io-9.3.11.v2016 0721.jar:/maven/org/eclipse/jetty/jetty-util/9.3.11.v20160721/jetty-util-9.3.11.v20160721.jar:/maven/org/eclipse/jetty/jetty-util-ajax/9.3.11.v20160721/jetty-util-ajax-9.3.11.v20160721.jar:/maven/com/sun/jersey/jersey-core/1.19/jersey-core-1.19.jar:/maven/javax/ws/rs/jsr311-api/1.1.1/jsr311-api-1.1.1.jar:/maven/com/sun/jersey/jersey-server/1.19/jersey-server-1.19.jar:/maven/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/maven/commons-codec/commons-codec/1.4/commons-codec-1.4.jar:/maven/commons-io/commons-io/2.4/commons-io-2.4.jar:/maven/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/maven/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/maven/commons-daemon/commons-daemon/1.0.13/commons-daemon-1.0.13.jar:/maven/log4j/log4j/1.2.17/log4j-1.2.17.jar:/maven/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar:/maven/javax/servlet/javax.servlet-api/3.1.0/javax.servlet-api-3.1.0.jar:/maven/org/slf4j/slf4j-log4j12/1.7.10/slf4j-log4j12-1.7.10.jar:/maven/xml enc/xmlenc/0.52/xmlenc-0.52.jar:/maven/io/netty/netty/3.10.5.Final/netty-3.10.5.Final.jar:/maven/io/netty/netty-all/4.1.0.Beta5/netty-all-4.1.0.Beta5.jar:/maven/com/twitter/hpack/0.11.0/hpack-0.11.0.jar:/maven/xerces/xercesImpl/2.9.1/xercesImpl-2.9.1.jar:/maven/xml-apis/xml-apis/1.3.04/xml-apis-1.3.04.jar:/maven/org/apache/htrace/htrace-core4/4.1.0-incubating/htrace-core4-4.1.0-incubating.jar:/maven/org/fusesource/leveldbjni/leveldbjni-all/1.8/leveldbjni-all-1.8.jar:/maven/com/fasterxml/jackson/core/jackson-databind/2.7.8/jackson-databind-2.7.8.jar:/maven/com/fasterxml/jackson/core/jackson-core/2.7.8/jackson-core-2.7.8.jar -sourcepath /build/source/hadoop-hdfs-project/hadoop-hdfs/src/main/java -doclet org.apache.hadoop.classification.tools.IncludePublicAnnotationsJDiffDoclet -docletpath /build/source/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/build/source/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar -apidir /build/source/hadoop-hdfs-project/hadoop-hdfs/target /site/jdiff/xml -apiname Apache Hadoop HDFS 3.0.0-alpha3 --> +<package name="org.apache.hadoop.hdfs"> + <doc> + <![CDATA[<p>A distributed implementation of {@link +org.apache.hadoop.fs.FileSystem}. This is loosely modelled after +Google's <a href="http://research.google.com/archive/gfs.html">GFS</a>.</p> + +<p>The most important difference is that unlike GFS, Hadoop DFS files +have strictly one writer at any one time. Bytes are always appended +to the end of the writer's stream. There is no notion of "record appends" +or "mutations" that are then checked or reordered. Writers simply emit +a byte stream. That byte stream is guaranteed to be stored in the +order written.</p>]]> + </doc> +</package> +<package name="org.apache.hadoop.hdfs.net"> +</package> +<package name="org.apache.hadoop.hdfs.protocol"> +</package> +<package name="org.apache.hadoop.hdfs.protocol.datatransfer"> +</package> +<package name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl"> +</package> +<package name="org.apache.hadoop.hdfs.protocolPB"> +</package> +<package name="org.apache.hadoop.hdfs.qjournal.client"> +</package> +<package name="org.apache.hadoop.hdfs.qjournal.protocol"> +</package> +<package name="org.apache.hadoop.hdfs.qjournal.protocolPB"> +</package> +<package name="org.apache.hadoop.hdfs.qjournal.server"> + <!-- start interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean --> + <interface name="JournalNodeMXBean" abstract="true" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <method name="getJournalsStatus" return="java.lang.String" + abstract="true" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <doc> + <![CDATA[Get status information (e.g., whether formatted) of JournalNode's journals. + + @return A string presenting status for each journal]]> + </doc> + </method> + <doc> + <![CDATA[This is the JMX management interface for JournalNode information]]> + </doc> + </interface> + <!-- end interface org.apache.hadoop.hdfs.qjournal.server.JournalNodeMXBean --> +</package> +<package name="org.apache.hadoop.hdfs.security.token.block"> +</package> +<package name="org.apache.hadoop.hdfs.security.token.delegation"> +</package> +<package name="org.apache.hadoop.hdfs.server.balancer"> +</package> +<package name="org.apache.hadoop.hdfs.server.blockmanagement"> +</package> +<package name="org.apache.hadoop.hdfs.server.common"> +</package> +<package name="org.apache.hadoop.hdfs.server.datanode"> +</package> +<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset"> +</package> +<package name="org.apache.hadoop.hdfs.server.datanode.fsdataset.impl"> +</package> +<package name="org.apache.hadoop.hdfs.server.datanode.metrics"> +</package> +<package name="org.apache.hadoop.hdfs.server.datanode.web"> +</package> +<package name="org.apache.hadoop.hdfs.server.datanode.web.dtp"> +</package> +<package name="org.apache.hadoop.hdfs.server.datanode.web.webhdfs"> +</package> +<package name="org.apache.hadoop.hdfs.server.diskbalancer"> +</package> +<package name="org.apache.hadoop.hdfs.server.diskbalancer.command"> +</package> +<package name="org.apache.hadoop.hdfs.server.diskbalancer.connectors"> +</package> +<package name="org.apache.hadoop.hdfs.server.diskbalancer.datamodel"> +</package> +<package name="org.apache.hadoop.hdfs.server.diskbalancer.planner"> +</package> +<package name="org.apache.hadoop.hdfs.server.mover"> +</package> +<package name="org.apache.hadoop.hdfs.server.namenode"> + <!-- start interface org.apache.hadoop.hdfs.server.namenode.AuditLogger --> + <interface name="AuditLogger" abstract="true" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <method name="initialize" + abstract="true" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <param name="conf" type="org.apache.hadoop.conf.Configuration"/> + <doc> + <![CDATA[Called during initialization of the logger. + + @param conf The configuration object.]]> + </doc> + </method> + <method name="logAuditEvent" + abstract="true" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <param name="succeeded" type="boolean"/> + <param name="userName" type="java.lang.String"/> + <param name="addr" type="java.net.InetAddress"/> + <param name="cmd" type="java.lang.String"/> + <param name="src" type="java.lang.String"/> + <param name="dst" type="java.lang.String"/> + <param name="stat" type="org.apache.hadoop.fs.FileStatus"/> + <doc> + <![CDATA[Called to log an audit event. + <p> + This method must return as quickly as possible, since it's called + in a critical section of the NameNode's operation. + + @param succeeded Whether authorization succeeded. + @param userName Name of the user executing the request. + @param addr Remote address of the request. + @param cmd The requested command. + @param src Path of affected source file. + @param dst Path of affected destination file (if any). + @param stat File information for operations that change the file's + metadata (permissions, owner, times, etc).]]> + </doc> + </method> + <doc> + <![CDATA[Interface defining an audit logger.]]> + </doc> + </interface> + <!-- end interface org.apache.hadoop.hdfs.server.namenode.AuditLogger --> + <!-- start class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger --> + <class name="HdfsAuditLogger" extends="java.lang.Object" + abstract="true" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <implements name="org.apache.hadoop.hdfs.server.namenode.AuditLogger"/> + <constructor name="HdfsAuditLogger" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + </constructor> + <method name="logAuditEvent" + abstract="false" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <param name="succeeded" type="boolean"/> + <param name="userName" type="java.lang.String"/> + <param name="addr" type="java.net.InetAddress"/> + <param name="cmd" type="java.lang.String"/> + <param name="src" type="java.lang.String"/> + <param name="dst" type="java.lang.String"/> + <param name="status" type="org.apache.hadoop.fs.FileStatus"/> + </method> + <method name="logAuditEvent" + abstract="false" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <param name="succeeded" type="boolean"/> + <param name="userName" type="java.lang.String"/> + <param name="addr" type="java.net.InetAddress"/> + <param name="cmd" type="java.lang.String"/> + <param name="src" type="java.lang.String"/> + <param name="dst" type="java.lang.String"/> + <param name="stat" type="org.apache.hadoop.fs.FileStatus"/> + <param name="callerContext" type="org.apache.hadoop.ipc.CallerContext"/> + <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/> + <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/> + <doc> + <![CDATA[Same as + {@link #logAuditEvent(boolean, String, InetAddress, String, String, String, + FileStatus)} with additional parameters related to logging delegation token + tracking IDs. + + @param succeeded Whether authorization succeeded. + @param userName Name of the user executing the request. + @param addr Remote address of the request. + @param cmd The requested command. + @param src Path of affected source file. + @param dst Path of affected destination file (if any). + @param stat File information for operations that change the file's metadata + (permissions, owner, times, etc). + @param callerContext Context information of the caller + @param ugi UserGroupInformation of the current user, or null if not logging + token tracking information + @param dtSecretManager The token secret manager, or null if not logging + token tracking information]]> + </doc> + </method> + <method name="logAuditEvent" + abstract="true" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <param name="succeeded" type="boolean"/> + <param name="userName" type="java.lang.String"/> + <param name="addr" type="java.net.InetAddress"/> + <param name="cmd" type="java.lang.String"/> + <param name="src" type="java.lang.String"/> + <param name="dst" type="java.lang.String"/> + <param name="stat" type="org.apache.hadoop.fs.FileStatus"/> + <param name="ugi" type="org.apache.hadoop.security.UserGroupInformation"/> + <param name="dtSecretManager" type="org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager"/> + <doc> + <![CDATA[Same as + {@link #logAuditEvent(boolean, String, InetAddress, String, String, + String, FileStatus, CallerContext, UserGroupInformation, + DelegationTokenSecretManager)} without {@link CallerContext} information.]]> + </doc> + </method> + <doc> + <![CDATA[Extension of {@link AuditLogger}.]]> + </doc> + </class> + <!-- end class org.apache.hadoop.hdfs.server.namenode.HdfsAuditLogger --> + <!-- start class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider --> + <class name="INodeAttributeProvider" extends="java.lang.Object" + abstract="true" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <constructor name="INodeAttributeProvider" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + </constructor> + <method name="start" + abstract="true" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <doc> + <![CDATA[Initialize the provider. This method is called at NameNode startup + time.]]> + </doc> + </method> + <method name="stop" + abstract="true" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <doc> + <![CDATA[Shutdown the provider. This method is called at NameNode shutdown time.]]> + </doc> + </method> + <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes" + abstract="false" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <param name="fullPath" type="java.lang.String"/> + <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/> + </method> + <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes" + abstract="true" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <param name="pathElements" type="java.lang.String[]"/> + <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/> + </method> + <method name="getAttributes" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributes" + abstract="false" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <param name="components" type="byte[][]"/> + <param name="inode" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributes"/> + </method> + <method name="getExternalAccessControlEnforcer" return="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer" + abstract="false" native="false" synchronized="false" + static="false" final="false" visibility="public" + deprecated="not deprecated"> + <param name="defaultEnforcer" type="org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer"/> + <doc> + <![CDATA[Can be over-ridden by implementations to provide a custom Access Control + Enforcer that can provide an alternate implementation of the + default permission checking logic. + @param defaultEnforcer The Default AccessControlEnforcer + @return The AccessControlEnforcer to use]]> + </doc> + </method> + </class> + <!-- end class org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider --> +</package> +<package name="org.apache.hadoop.hdfs.server.namenode.ha"> +</package> +<package name="org.apache.hadoop.hdfs.server.namenode.metrics"> +</package> +<package name="org.apache.hadoop.hdfs.server.namenode.snapshot"> +</package> +<package name="org.apache.hadoop.hdfs.server.namenode.top"> +</package> +<package name="org.apache.hadoop.hdfs.server.namenode.top.metrics"> +</package> +<package name="org.apache.hadoop.hdfs.server.namenode.top.window"> +</package> +<package name="org.apache.hadoop.hdfs.server.namenode.web.resources"> +</package> +<package name="org.apache.hadoop.hdfs.server.protocol"> +</package> +<package name="org.apache.hadoop.hdfs.tools"> +</package> +<package name="org.apache.hadoop.hdfs.tools.erasurecode"> +</package> +<package name="org.apache.hadoop.hdfs.tools.offlineEditsViewer"> +</package> +<package name="org.apache.hadoop.hdfs.tools.offlineImageViewer"> +</package> +<package name="org.apache.hadoop.hdfs.tools.snapshot"> +</package> +<package name="org.apache.hadoop.hdfs.util"> +</package> +<package name="org.apache.hadoop.hdfs.web"> +</package> +<package name="org.apache.hadoop.hdfs.web.resources"> +</package> + +</api> --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org