http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
new file mode 100644
index 0000000..7b22e3e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/JsonSerDeser.java
@@ -0,0 +1,249 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.codehaus.jackson.JsonGenerationException;
+import org.codehaus.jackson.JsonParseException;
+import org.codehaus.jackson.map.DeserializationConfig;
+import org.codehaus.jackson.map.JsonMappingException;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.PropertyNamingStrategy;
+import org.codehaus.jackson.map.SerializationConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.EOFException;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+/**
+ * Support for marshalling objects to and from JSON.
+ * This class is NOT thread safe; it constructs an object mapper
+ * as an instance field.
+ * @param <T>
+ */
+public class JsonSerDeser<T> {
+
+  private static final Logger log = 
LoggerFactory.getLogger(JsonSerDeser.class);
+  private static final String UTF_8 = "UTF-8";
+
+  private final Class<T> classType;
+  private final ObjectMapper mapper;
+
+  /**
+   * Create an instance bound to a specific type
+   * @param classType class type
+   */
+  public JsonSerDeser(Class<T> classType) {
+    this.classType = classType;
+    this.mapper = new ObjectMapper();
+    mapper.configure(DeserializationConfig.Feature.FAIL_ON_UNKNOWN_PROPERTIES, 
false);
+  }
+
+  public JsonSerDeser(Class<T> classType, PropertyNamingStrategy 
namingStrategy) {
+    this(classType);
+    mapper.setPropertyNamingStrategy(namingStrategy);
+  }
+
+  /**
+   * Convert from JSON
+   * @param json input
+   * @return the parsed JSON
+   * @throws IOException IO
+   * @throws JsonMappingException failure to map from the JSON to this class
+   */
+  public T fromJson(String json)
+    throws IOException, JsonParseException, JsonMappingException {
+    try {
+      return mapper.readValue(json, classType);
+    } catch (IOException e) {
+      log.error("Exception while parsing json : " + e + "\n" + json, e);
+      throw e;
+    }
+  }
+
+  /**
+   * Convert from a JSON file
+   * @param jsonFile input file
+   * @return the parsed JSON
+   * @throws IOException IO problems
+   * @throws JsonMappingException failure to map from the JSON to this class
+   */
+  public T fromFile(File jsonFile)
+    throws IOException, JsonParseException, JsonMappingException {
+    File absoluteFile = jsonFile.getAbsoluteFile();
+    try {
+      return mapper.readValue(absoluteFile, classType);
+    } catch (IOException e) {
+      log.error("Exception while parsing json file {}", absoluteFile, e);
+      throw e;
+    }
+  }
+
+  /**
+   * Convert from a JSON file
+   * @param resource input file
+   * @return the parsed JSON
+   * @throws IOException IO problems
+   * @throws JsonMappingException failure to map from the JSON to this class
+   */
+ public T fromResource(String resource)
+    throws IOException, JsonParseException, JsonMappingException {
+    try(InputStream resStream = this.getClass().getResourceAsStream(resource)) 
{
+      if (resStream == null) {
+        throw new FileNotFoundException(resource);
+      }
+      return (T) (mapper.readValue(resStream, classType));
+    } catch (IOException e) {
+      log.error("Exception while parsing json resource {}", resource, e);
+      throw e;
+    }
+  }
+
+  /**
+   * Convert from an input stream, closing the stream afterwards.
+   * @param stream
+   * @return the parsed JSON
+   * @throws IOException IO problems
+   */
+  public T fromStream(InputStream stream) throws IOException {
+    try {
+      return (T) (mapper.readValue(stream, classType));
+    } catch (IOException e) {
+      log.error("Exception while parsing json input stream", e);
+      throw e;
+    } finally {
+      IOUtils.closeStream(stream);
+    }
+  }
+
+  /**
+   * clone by converting to JSON and back again.
+   * This is much less efficient than any Java clone process.
+   * @param instance instance to duplicate
+   * @return a new instance
+   * @throws IOException problems.
+   */
+  public T fromInstance(T instance) throws IOException {
+    return fromJson(toJson(instance));
+  }
+
+  /**
+   * Deserialize from a byte array
+   * @param b
+   * @return the deserialized value
+   * @throws IOException parse problems
+   */
+  public T fromBytes(byte[] b) throws IOException {
+    String json = new String(b, 0, b.length, UTF_8);
+    return fromJson(json);
+  }
+  
+  /**
+   * Load from a Hadoop filesystem
+   * @param fs filesystem
+   * @param path path
+   * @return a loaded CD
+   * @throws IOException IO problems
+   * @throws JsonParseException parse problems
+   * @throws JsonMappingException O/J mapping problems
+   */
+  public T load(FileSystem fs, Path path)
+    throws IOException, JsonParseException, JsonMappingException {
+    FileStatus status = fs.getFileStatus(path);
+    long len = status.getLen();
+    byte[] b = new byte[(int) len];
+    FSDataInputStream dataInputStream = fs.open(path);
+    int count = dataInputStream.read(b);
+    if (count != len) {
+      throw new EOFException("Read of " + path +" finished prematurely");
+    }
+    return fromBytes(b);
+  }
+
+
+  /**
+   * Save to a hadoop filesystem
+   * @param fs filesystem
+   * @param path path
+   * @param instance instance to save
+   * @param overwrite should any existing file be overwritten
+   * @throws IOException IO exception
+   */
+  public void save(FileSystem fs, Path path, T instance,
+                   boolean overwrite) throws
+                                      IOException {
+    FSDataOutputStream dataOutputStream = fs.create(path, overwrite);
+    writeJsonAsBytes(instance, dataOutputStream);
+  }
+
+  /**
+   * Save an instance to a file
+   * @param instance instance to save
+   * @param file file
+   * @throws IOException
+   */
+  public void save(T instance, File file) throws
+      IOException {
+    writeJsonAsBytes(instance, new FileOutputStream(file.getAbsoluteFile()));
+  }
+
+  /**
+   * Write the json as bytes -then close the file
+   * @param dataOutputStream an outout stream that will always be closed
+   * @throws IOException on any failure
+   */
+  private void writeJsonAsBytes(T instance,
+      OutputStream dataOutputStream) throws IOException {
+    try {
+      String json = toJson(instance);
+      byte[] b = json.getBytes(UTF_8);
+      dataOutputStream.write(b);
+      dataOutputStream.flush();
+      dataOutputStream.close();
+    } finally {
+      IOUtils.closeStream(dataOutputStream);
+    }
+  }
+
+  /**
+   * Convert an object to a JSON string
+   * @param instance instance to convert
+   * @return a JSON string description
+   * @throws JsonParseException parse problems
+   * @throws JsonMappingException O/J mapping problems
+   */
+  public String toJson(T instance) throws IOException,
+                                               JsonGenerationException,
+                                               JsonMappingException {
+    mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
+    return mapper.writeValueAsString(instance);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/KerberosDiags.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/KerberosDiags.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/KerberosDiags.java
new file mode 100644
index 0000000..c0712c3
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/KerberosDiags.java
@@ -0,0 +1,680 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SaslPropertiesResolver;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.Shell;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.crypto.Cipher;
+import java.io.Closeable;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.lang.reflect.InvocationTargetException;
+import java.net.InetAddress;
+import java.security.NoSuchAlgorithmException;
+import java.util.Collection;
+import java.util.Date;
+import java.util.List;
+import java.util.regex.Pattern;
+
+import static org.apache.hadoop.security.UserGroupInformation.*;
+import static org.apache.hadoop.security.authentication.util.KerberosUtil.*;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
+
+/**
+ * Kerberos diagnostics
+ * At some point this may move to hadoop core, so please keep use of slider
+ * methods and classes to ~0.
+ *
+ * This operation expands some of the diagnostic output of the security code,
+ * but not all. For completeness
+ *
+ * Set the environment variable {@code HADOOP_JAAS_DEBUG=true}
+ * Set the log level for {@code org.apache.hadoop.security=DEBUG}
+ */
+public class KerberosDiags implements Closeable {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(KerberosDiags.class);
+  public static final String KRB5_CCNAME = "KRB5CCNAME";
+  public static final String JAVA_SECURITY_KRB5_CONF
+    = "java.security.krb5.conf";
+  public static final String JAVA_SECURITY_KRB5_REALM
+    = "java.security.krb5.realm";
+  public static final String SUN_SECURITY_KRB5_DEBUG
+    = "sun.security.krb5.debug";
+  public static final String SUN_SECURITY_SPNEGO_DEBUG
+    = "sun.security.spnego.debug";
+  public static final String SUN_SECURITY_JAAS_FILE
+    = "java.security.auth.login.config";
+  public static final String KERBEROS_KINIT_COMMAND
+    = "hadoop.kerberos.kinit.command";
+  public static final String HADOOP_AUTHENTICATION_IS_DISABLED
+      = "Hadoop authentication is disabled";
+  public static final String UNSET = "(unset)";
+  public static final String NO_DEFAULT_REALM = "Cannot locate default realm";
+
+  private final Configuration conf;
+  private final List<String> services;
+  private final PrintStream out;
+  private final File keytab;
+  private final String principal;
+  private final long minKeyLength;
+  private final boolean securityRequired;
+
+  public static final String CAT_JVM = "JVM";
+  public static final String CAT_JAAS = "JAAS";
+  public static final String CAT_CONFIG = "CONFIG";
+  public static final String CAT_LOGIN = "LOGIN";
+  public static final String CAT_KERBEROS = "KERBEROS";
+  public static final String CAT_SASL = "SASL";
+
+  @SuppressWarnings("IOResourceOpenedButNotSafelyClosed")
+  public KerberosDiags(Configuration conf,
+      PrintStream out,
+      List<String> services,
+      File keytab,
+      String principal,
+      long minKeyLength,
+      boolean securityRequired) {
+    this.conf = conf;
+    this.services = services;
+    this.keytab = keytab;
+    this.principal = principal;
+    this.out = out;
+    this.minKeyLength = minKeyLength;
+    this.securityRequired = securityRequired;
+  }
+
+  @Override
+  public void close() throws IOException {
+    flush();
+  }
+
+  /**
+   * Execute diagnostics.
+   * <p>
+   * Things it would be nice if UGI made accessible
+   * <ol>
+   *   <li>A way to enable JAAS debug programatically</li>
+   *   <li>Access to the TGT</li>
+   * </ol>
+   * @return true if security was enabled and all probes were successful
+   * @throws KerberosDiagsFailure explicitly raised failure
+   * @throws Exception other security problems
+   */
+  @SuppressWarnings("deprecation")
+  public boolean execute() throws Exception {
+
+    title("Kerberos Diagnostics scan at %s",
+        new Date(System.currentTimeMillis()));
+
+    // check that the machine has a name
+    println("Hostname: %s",
+        InetAddress.getLocalHost().getCanonicalHostName());
+
+    // Fail fast on a JVM without JCE installed.
+    validateKeyLength();
+
+    // look at realm
+    println("JVM Kerberos Login Module = %s", getKrb5LoginModuleName());
+    printDefaultRealm();
+
+    title("System Properties");
+    for (String prop : new String[]{
+      JAVA_SECURITY_KRB5_CONF,
+      JAVA_SECURITY_KRB5_REALM,
+      SUN_SECURITY_KRB5_DEBUG,
+      SUN_SECURITY_SPNEGO_DEBUG,
+      SUN_SECURITY_JAAS_FILE
+    }) {
+      printSysprop(prop);
+    }
+
+    title("Environment Variables");
+    for (String env : new String[]{
+      "HADOOP_JAAS_DEBUG",
+      KRB5_CCNAME,
+      "HADOOP_USER_NAME",
+      "HADOOP_PROXY_USER",
+      HADOOP_TOKEN_FILE_LOCATION,
+    }) {
+      printEnv(env);
+    }
+
+    for (String prop : new String[]{
+      KERBEROS_KINIT_COMMAND,
+      HADOOP_SECURITY_AUTHENTICATION,
+      HADOOP_SECURITY_AUTHORIZATION,
+      "hadoop.kerberos.min.seconds.before.relogin",    // not in 2.6
+      "hadoop.security.dns.interface",   // not in 2.6
+      "hadoop.security.dns.nameserver",  // not in 2.6
+      HADOOP_RPC_PROTECTION,
+      HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS,
+      HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX,
+      HADOOP_SECURITY_GROUP_MAPPING,
+      "hadoop.security.impersonation.provider.class",    // not in 2.6
+      "dfs.data.transfer.protection" // HDFS
+    }) {
+      printConfOpt(prop);
+    }
+
+    // check that authentication is enabled
+    if (SecurityUtil.getAuthenticationMethod(conf)
+        .equals(AuthenticationMethod.SIMPLE)) {
+      println(HADOOP_AUTHENTICATION_IS_DISABLED);
+      failif(securityRequired, CAT_CONFIG, HADOOP_AUTHENTICATION_IS_DISABLED);
+      // no security, skip rest of test
+      return false;
+    }
+
+    validateKrb5File();
+    validateSasl(HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS);
+    validateSasl("dfs.data.transfer.saslproperties.resolver.class");
+    validateKinitExecutable();
+    validateJAAS();
+    // now the big test: login, then try again
+    boolean krb5Debug = getAndSet(SUN_SECURITY_KRB5_DEBUG);
+    boolean spnegoDebug = getAndSet(SUN_SECURITY_SPNEGO_DEBUG);
+    try {
+      title("Logging in");
+
+      if (keytab != null) {
+        dumpKeytab(keytab);
+        loginFromKeytab();
+      } else {
+        UserGroupInformation loginUser = getLoginUser();
+        dumpUGI("Log in user", loginUser);
+        validateUGI("Login user", loginUser);
+        println("Ticket based login: %b", isLoginTicketBased());
+        println("Keytab based login: %b", isLoginKeytabBased());
+      }
+
+      return true;
+    } finally {
+      // restore original system properties
+      System.setProperty(SUN_SECURITY_KRB5_DEBUG,
+        Boolean.toString(krb5Debug));
+      System.setProperty(SUN_SECURITY_SPNEGO_DEBUG,
+        Boolean.toString(spnegoDebug));
+    }
+  }
+
+  /**
+   * Fail fast on a JVM without JCE installed.
+   *
+   * This is a recurrent problem
+   * (that is: it keeps creeping back with JVM updates);
+   * a fast failure is the best tactic
+   * @throws NoSuchAlgorithmException
+   */
+
+  protected void validateKeyLength() throws NoSuchAlgorithmException {
+    int aesLen = Cipher.getMaxAllowedKeyLength("AES");
+    println("Maximum AES encryption key length %d bits", aesLen);
+    failif (aesLen < minKeyLength,
+        CAT_JVM,
+        "Java Cryptography Extensions are not installed on this JVM."
+        +" Maximum supported key length %s - minimum required %d",
+        aesLen, minKeyLength);
+  }
+
+  /**
+   * Get the default realm.
+   * <p>
+   * Not having a default realm may be harmless, so is noted at info.
+   * All other invocation failures are downgraded to warn, as
+   * follow-on actions may still work.
+   * failure to invoke the method via introspection is rejected,
+   * as it's a sign of JVM compatibility issues that may have other
+   * consequences
+   */
+  protected void printDefaultRealm() {
+    try {
+      println("Default Realm = %s",
+          getDefaultRealm());
+    } catch (ClassNotFoundException
+        | IllegalAccessException
+        | NoSuchMethodException e) {
+
+      throw new KerberosDiagsFailure(CAT_JVM, e,
+          "Failed to invoke krb5.Config.getDefaultRealm: %s", e);
+    } catch (InvocationTargetException e) {
+      Throwable cause = e.getCause() != null ? e.getCause() : e;
+      if (cause.toString().contains(NO_DEFAULT_REALM)) {
+        // exception raised if there is no default realm. This is not
+        // always a problem, so downgrade to a message.
+        println("Host has no default realm");
+        LOG.debug(cause.toString(), cause);
+      } else {
+        println("Kerberos.getDefaultRealm() failed: %s\n%s",
+            cause,
+            org.apache.hadoop.util.StringUtils.stringifyException(cause));
+      }
+    }
+  }
+
+  /**
+   * Locate the krb5.conf file and dump it.
+   * No-op on windows.
+   * @throws IOException
+   */
+  private void validateKrb5File() throws IOException {
+    if (!Shell.WINDOWS) {
+      title("Locating Kerberos configuration file");
+      String krbPath = "/etc/krb5.conf";
+      String jvmKrbPath = System.getProperty(JAVA_SECURITY_KRB5_CONF);
+      if (jvmKrbPath != null) {
+        println("Setting kerberos path from sysprop %s: %s",
+          JAVA_SECURITY_KRB5_CONF, jvmKrbPath);
+        krbPath = jvmKrbPath;
+      }
+
+      String krb5name = System.getenv(KRB5_CCNAME);
+      if (krb5name != null) {
+        println("Setting kerberos path from environment variable %s: %s",
+          KRB5_CCNAME, krb5name);
+        krbPath = krb5name;
+        if (jvmKrbPath != null) {
+          println("Warning - both %s and %s were set - %s takes priority",
+            JAVA_SECURITY_KRB5_CONF, KRB5_CCNAME, KRB5_CCNAME);
+        }
+      }
+
+      File krbFile = new File(krbPath);
+      println("Kerberos configuration file = %s", krbFile);
+      failif(!krbFile.exists(),
+          CAT_KERBEROS,
+          "Kerberos configuration file %s not found", krbFile);
+      dump(krbFile);
+    }
+  }
+
+  /**
+   * Dump a keytab: list all principals.
+   * @param keytabFile the keytab file
+   * @throws IOException IO problems
+   */
+  public void dumpKeytab(File keytabFile) throws IOException {
+    title("Examining keytab %s", keytabFile);
+    File kt = keytabFile.getCanonicalFile();
+    failif(!kt.exists(), CAT_CONFIG, "Keytab not found: %s", kt);
+    failif(!kt.isFile(), CAT_CONFIG, "Keytab is not a valid file: %s", kt);
+
+    String[] names = getPrincipalNames(keytabFile.getCanonicalPath(),
+        Pattern.compile(".*"));
+    println("keytab entry count: %d", names.length);
+    for (String name : names) {
+      println("    %s", name);
+    }
+    println("-----");
+  }
+
+  /**
+   * Log in from a keytab, dump the UGI, validate it, then try and log in 
again.
+   * That second-time login catches JVM/Hadoop compatibility problems.
+   * @throws IOException
+   */
+  private void loginFromKeytab() throws IOException {
+    UserGroupInformation ugi;
+    String identity;
+    if (keytab != null) {
+      File kt = keytab.getCanonicalFile();
+      println("Using keytab %s principal %s", kt, principal);
+      identity = principal;
+
+      failif(StringUtils.isEmpty(principal), CAT_KERBEROS,
+          "No principal defined");
+      ugi = loginUserFromKeytabAndReturnUGI(principal, kt.getPath());
+      dumpUGI(identity, ugi);
+      validateUGI(principal, ugi);
+
+      title("Attempting to log in from keytab again");
+      // package scoped -hence the reason why this class must be in the
+      // hadoop.security package
+      setShouldRenewImmediatelyForTests(true);
+      // attempt a new login
+      ugi.reloginFromKeytab();
+    } else {
+      println("No keytab: logging is as current user");
+    }
+  }
+
+  /**
+   * Dump a UGI.
+   * @param title title of this section
+   * @param ugi UGI to dump
+   * @throws IOException
+   */
+  private void dumpUGI(String title, UserGroupInformation ugi)
+    throws IOException {
+    title(title);
+    println("UGI instance = %s", ugi);
+    println("Has kerberos credentials: %b", ugi.hasKerberosCredentials());
+    println("Authentication method: %s", ugi.getAuthenticationMethod());
+    println("Real Authentication method: %s",
+      ugi.getRealAuthenticationMethod());
+    title("Group names");
+    for (String name : ugi.getGroupNames()) {
+      println(name);
+    }
+    title("Credentials");
+    Credentials credentials = ugi.getCredentials();
+    List<Text> secretKeys = credentials.getAllSecretKeys();
+    title("Secret keys");
+    if (!secretKeys.isEmpty()) {
+      for (Text secret: secretKeys) {
+        println("%s", secret);
+      }
+    } else {
+      println("(none)");
+    }
+
+    dumpTokens(ugi);
+  }
+
+  /**
+   * Validate the UGI: verify it is kerberized.
+   * @param messagePrefix message in exceptions
+   * @param user user to validate
+   */
+  private void validateUGI(String messagePrefix, UserGroupInformation user) {
+    failif(!user.hasKerberosCredentials(),
+        CAT_LOGIN, "%s: No kerberos credentials for %s", messagePrefix, user);
+    failif(user.getAuthenticationMethod() == null,
+        CAT_LOGIN, "%s: Null AuthenticationMethod for %s", messagePrefix, 
user);
+  }
+
+  /**
+   * A cursory look at the {@code kinit} executable.
+   * If it is an absolute path: it must exist with a size > 0.
+   * If it is just a command, it has to be on the path. There's no check
+   * for that -but the PATH is printed out.
+   */
+  private void validateKinitExecutable() {
+    String kinit = conf.getTrimmed(KERBEROS_KINIT_COMMAND, "");
+    if (!kinit.isEmpty()) {
+      File kinitPath = new File(kinit);
+      println("%s = %s", KERBEROS_KINIT_COMMAND, kinitPath);
+      if (kinitPath.isAbsolute()) {
+        failif(!kinitPath.exists(), CAT_KERBEROS,
+            "%s executable does not exist: %s",
+            KERBEROS_KINIT_COMMAND, kinitPath);
+        failif(!kinitPath.isFile(), CAT_KERBEROS,
+            "%s path does not refer to a file: %s",
+            KERBEROS_KINIT_COMMAND, kinitPath);
+        failif(kinitPath.length() == 0, CAT_KERBEROS,
+            "%s file is empty: %s",
+            KERBEROS_KINIT_COMMAND, kinitPath);
+      } else {
+        println("Executable %s is relative -must be on the PATH", kinit);
+        printEnv("PATH");
+      }
+    }
+  }
+
+  /**
+   * Try to load the SASL resolver.
+   * @param saslPropsResolverKey key for the SASL resolver
+   */
+  private void validateSasl(String saslPropsResolverKey) {
+    title("Resolving SASL property %s", saslPropsResolverKey);
+    String saslPropsResolver = conf.getTrimmed(saslPropsResolverKey);
+    try {
+      Class<? extends SaslPropertiesResolver> resolverClass = conf.getClass(
+          saslPropsResolverKey,
+          SaslPropertiesResolver.class, SaslPropertiesResolver.class);
+      println("Resolver is %s", resolverClass);
+    } catch (RuntimeException e) {
+      throw new KerberosDiagsFailure(CAT_SASL, e,
+          "Failed to load %s class %s",
+          saslPropsResolverKey, saslPropsResolver);
+    }
+  }
+
+  /**
+   * Validate any JAAS entry referenced in the {@link #SUN_SECURITY_JAAS_FILE}
+   * property.
+   */
+  private void validateJAAS() {
+    String jaasFilename = System.getProperty(SUN_SECURITY_JAAS_FILE);
+    if (jaasFilename != null) {
+      title("JAAS");
+      File jaasFile = new File(jaasFilename);
+      println("JAAS file is defined in %s: %s",
+          SUN_SECURITY_JAAS_FILE, jaasFile);
+      failif(!jaasFile.exists(), CAT_JAAS,
+          "JAAS file does not exist: %s", jaasFile);
+      failif(!jaasFile.isFile(), CAT_JAAS,
+          "Specified JAAS file is not a file: %s", jaasFile);
+    }
+  }
+
+  /**
+   * Dump all tokens of a user
+   * @param user user
+   */
+  public void dumpTokens(UserGroupInformation user) {
+    Collection<Token<? extends TokenIdentifier>> tokens
+      = user.getCredentials().getAllTokens();
+    title("Token Count: %d", tokens.size());
+    for (Token<? extends TokenIdentifier> token : tokens) {
+      println("Token %s", token.getKind());
+    }
+  }
+
+  /**
+   * Set the System property to true; return the old value for caching
+   * @param sysprop property
+   * @return the previous value
+   */
+  private boolean getAndSet(String sysprop) {
+    boolean old = Boolean.getBoolean(sysprop);
+    System.setProperty(sysprop, "true");
+    return old;
+  }
+
+  /**
+   * Flush all active output channels, including {@Code System.err},
+   * so as to stay in sync with any JRE log messages.
+   */
+  private void flush() {
+    if (out != null) {
+      out.flush();
+    } else {
+      System.out.flush();
+    }
+    System.err.flush();
+  }
+
+  /**
+   * Format and print a line of output.
+   * This goes to any output file, or
+   * is logged at info. The output is flushed before and after, to
+   * try and stay in sync with JRE logging.
+   * @param format format string
+   * @param args any arguments
+   */
+  @VisibleForTesting
+  public void println(String format, Object... args) {
+    println(format(format, args));
+  }
+
+  /**
+   * Print a line of output. This goes to any output file, or
+   * is logged at info. The output is flushed before and after, to
+   * try and stay in sync with JRE logging.
+   * @param msg message string
+   */
+  @VisibleForTesting
+  private void println(String msg) {
+    flush();
+    if (out != null) {
+      out.println(msg);
+    } else {
+      LOG.info(msg);
+    }
+    flush();
+  }
+
+  /**
+   * Print a title entry
+   * @param format format string
+   * @param args any arguments
+   */
+  private void title(String format, Object... args) {
+    println("");
+    println("");
+    String msg = "== " + format(format, args) + " ==";
+    println(msg);
+    println("");
+  }
+
+  /**
+   * Print a system property, or {@link #UNSET} if unset.
+   * @param property property to print
+   */
+  private void printSysprop(String property) {
+    println("%s = \"%s\"", property,
+        System.getProperty(property, UNSET));
+  }
+
+  /**
+   * Print a configuration option, or {@link #UNSET} if unset.
+   * @param option option to print
+   */
+  private void printConfOpt(String option) {
+    println("%s = \"%s\"", option, conf.get(option, UNSET));
+  }
+
+  /**
+   * Print an environment variable's name and value; printing
+   * {@link #UNSET} if it is not set
+   * @param variable environment variable
+   */
+  private void printEnv(String variable) {
+    String env = System.getenv(variable);
+    println("%s = \"%s\"", variable, env != null ? env : UNSET);
+  }
+
+  /**
+   * Dump any file to standard out; add a trailing newline
+   * @param file file to dump
+   * @throws IOException IO problems
+   */
+  public void dump(File file) throws IOException {
+    try (FileInputStream in = new FileInputStream(file)) {
+      for (String line : IOUtils.readLines(in)) {
+        println("%s", line);
+      }
+    }
+    println("");
+  }
+
+  /**
+   * Format and raise a failure
+   *
+   * @param category category for exception
+   * @param message string formatting message
+   * @param args any arguments for the formatting
+   * @throws KerberosDiagsFailure containing the formatted text
+   */
+  private void fail(String category, String message, Object... args)
+    throws KerberosDiagsFailure {
+    throw new KerberosDiagsFailure(category, message, args);
+  }
+
+  /**
+   * Conditional failure with string formatted arguments
+   * @param condition failure condition
+   * @param category category for exception
+   * @param message string formatting message
+   * @param args any arguments for the formatting
+   * @throws KerberosDiagsFailure containing the formatted text
+   *         if the condition was met
+   */
+  private void failif(boolean condition,
+      String category,
+      String message,
+      Object... args)
+    throws KerberosDiagsFailure {
+    if (condition) {
+      fail(category, message, args);
+    }
+  }
+
+  /**
+   * Format a string, treating a call where there are no varags values
+   * as a string to pass through unformatted.
+   * @param message message, which is either a format string + args, or
+   * a general string
+   * @param args argument array
+   * @return a string for printing.
+   */
+  public static String format(String message, Object... args) {
+    if (args.length == 0) {
+      return message;
+    } else {
+      return String.format(message, args);
+    }
+  }
+
+  /**
+   * Diagnostics failures return the exit code 41, "unauthorized".
+   *
+   * They have a category, initially for testing: the category can be
+   * validated without having to match on the entire string.
+   */
+  public static class KerberosDiagsFailure extends ExitUtil.ExitException {
+    private final String category;
+
+    public KerberosDiagsFailure(String category, String message) {
+      super(41, category + ": " + message);
+      this.category = category;
+    }
+
+    public KerberosDiagsFailure(String category, String message, Object... 
args) {
+      this(category, format(message, args));
+    }
+
+    public KerberosDiagsFailure(String category, Throwable throwable,
+        String message, Object... args) {
+      this(category, message, args);
+      initCause(throwable);
+    }
+
+    public String getCategory() {
+      return category;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java
new file mode 100644
index 0000000..108ca22
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PatternValidator.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import java.util.regex.Pattern;
+
+/**
+ * Utility class to validate strings against a predefined pattern.
+ */
+public class PatternValidator {
+
+  public static final String E_INVALID_NAME =
+      "Invalid name %s does not match the pattern %s ";
+  private final Pattern valid;
+  private final String pattern;
+
+  public PatternValidator(String pattern) {
+    this.pattern = pattern;
+    valid = Pattern.compile(pattern);
+  }
+
+  /**
+   * Validate the name -restricting it to the set defined in 
+   * @param name name to validate
+   * @throws IllegalArgumentException if not a valid name
+   */
+  public void validate(String name) {
+    if (!matches(name)) {
+      throw new IllegalArgumentException(
+          String.format(E_INVALID_NAME, name, pattern));
+    }
+  }
+
+  /**
+   * Query to see if the pattern matches
+   * @param name name to validate
+   * @return true if the string matches the pattern
+   */
+  public boolean matches(String name) {
+    return valid.matcher(name).matches();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java
new file mode 100644
index 0000000..2dbf37f
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PortScanner.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.utils;
+
+import org.apache.hadoop.yarn.service.conf.SliderExitCodes;
+import org.apache.hadoop.yarn.service.exceptions.BadConfigException;
+import org.apache.hadoop.yarn.service.exceptions.SliderException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * a scanner which can take an input string for a range or scan the lot.
+ */
+public class PortScanner {
+  private static Pattern NUMBER_RANGE = 
Pattern.compile("^(\\d+)\\s*-\\s*(\\d+)$");
+  private static Pattern SINGLE_NUMBER = Pattern.compile("^\\d+$");
+
+  private List<Integer> remainingPortsToCheck;
+
+  public PortScanner() {
+  }
+
+  public void setPortRange(String input) throws BadConfigException {
+    // first split based on commas
+    Set<Integer> inputPorts= new TreeSet<Integer>();
+    String[] ranges = input.split(",");
+    for ( String range : ranges ) {
+      if (range.trim().isEmpty()) {
+        continue;
+      }
+      Matcher m = SINGLE_NUMBER.matcher(range.trim());
+      if (m.find()) {
+        inputPorts.add(Integer.parseInt(m.group()));
+        continue;
+      }
+      m = NUMBER_RANGE.matcher(range.trim());
+      if (m.find()) {
+        String[] boundaryValues = m.group(0).split("-");
+        int start = Integer.parseInt(boundaryValues[0].trim());
+        int end = Integer.parseInt(boundaryValues[1].trim());
+        if (end < start) {
+          throw new BadConfigException("End of port range is before start: "
+              + range + " in input: " + input);
+        }
+        for (int i = start; i < end + 1; i++) {
+          inputPorts.add(i);
+        }
+        continue;
+      }
+      throw new BadConfigException("Bad port range: " + range + " in input: "
+          + input);
+    }
+    if (inputPorts.size() == 0) {
+      throw new BadConfigException("No ports found in range: " + input);
+    }
+    this.remainingPortsToCheck = new ArrayList<Integer>(inputPorts);
+  }
+
+  public List<Integer> getRemainingPortsToCheck() {
+    return remainingPortsToCheck;
+  }
+
+  public int getAvailablePort() throws SliderException, IOException {
+    if (remainingPortsToCheck != null) {
+      return getAvailablePortViaPortArray();
+    } else {
+      return SliderUtils.getOpenPort();
+    }
+  }
+
+  private int getAvailablePortViaPortArray() throws SliderException {
+    boolean found = false;
+    int availablePort = -1;
+    Iterator<Integer> portsToCheck = this.remainingPortsToCheck.iterator();
+    while (portsToCheck.hasNext() && !found) {
+      int portToCheck = portsToCheck.next();
+      found = SliderUtils.isPortAvailable(portToCheck);
+      if (found) {
+        availablePort = portToCheck;
+        portsToCheck.remove();
+      }
+    }
+
+    if (availablePort < 0) {
+      throw new SliderException(SliderExitCodes.EXIT_BAD_CONFIGURATION,
+        "No available ports found in configured range {}",
+        remainingPortsToCheck);
+    }
+
+    return availablePort;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java
new file mode 100644
index 0000000..9d00b3c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfiguration.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.service.api.records.ConfigFormat;
+import org.apache.hadoop.yarn.service.exceptions.BadConfigException;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import java.io.IOException;
+import java.util.Date;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * JSON-serializable description of a published key-val configuration.
+ * 
+ * The values themselves are not serialized in the external view; they have
+ * to be served up by the far end
+ */
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+public class PublishedConfiguration {
+
+  public String description;
+  public long updated;
+  
+  public String updatedTime;
+
+  public Map<String, String> entries = new HashMap<>();
+
+  public PublishedConfiguration() {
+  }
+
+  /**
+   * build an empty published configuration 
+   * @param description configuration description
+   */
+  public PublishedConfiguration(String description) {
+    this.description = description;
+  }
+
+  /**
+   * Build a configuration from the entries
+   * @param description configuration description
+   * @param entries entries to put
+   */
+  public PublishedConfiguration(String description,
+      Iterable<Map.Entry<String, String>> entries) {
+    this.description = description;
+    putValues(entries);
+  }
+
+  /**
+   * Build a published configuration, using the keys from keysource,
+   * but resolving the values from the value source, via Configuration.get()
+   * @param description configuration description
+   * @param keysource source of keys
+   * @param valuesource source of values
+   */
+  public PublishedConfiguration(String description,
+      Iterable<Map.Entry<String, String>> keysource,
+      Configuration valuesource) {
+    this.description = description;
+    putValues(ConfigHelper.resolveConfiguration(keysource, valuesource));
+  }
+
+  
+  /**
+   * Is the configuration empty. This means either that it has not
+   * been given any values, or it is stripped down copy set down over the
+   * wire.
+   * @return true if it is empty
+   */
+  public boolean isEmpty() {
+    return entries.isEmpty();
+  }
+
+
+  public void setUpdated(long updated) {
+    this.updated = updated;
+    this.updatedTime = new Date(updated).toString();
+  }
+
+  public long getUpdated() {
+    return updated;
+  }
+
+  /**
+   * Set the values from an iterable (this includes a Hadoop Configuration
+   * and Java properties object).
+   * Any existing value set is discarded
+   * @param entries entries to put
+   */
+  public void putValues(Iterable<Map.Entry<String, String>> entries) {
+    this.entries = new HashMap<String, String>();
+    for (Map.Entry<String, String> entry : entries) {
+      this.entries.put(entry.getKey(), entry.getValue());
+    }
+    
+  }
+
+  /**
+   * Convert to Hadoop XML
+   * @return the configuration as a Hadoop Configuratin
+   */
+  public Configuration asConfiguration() {
+    Configuration conf = new Configuration(false);
+    try {
+      ConfigHelper.addConfigMap(conf, entries, "");
+    } catch (BadConfigException e) {
+      // triggered on a null value; switch to a runtime (and discard the stack)
+      throw new RuntimeException(e.toString());
+    }
+    return conf;
+  }
+  
+  public String asConfigurationXML() throws IOException {
+    return ConfigHelper.toXml(asConfiguration());
+  }
+
+  /**
+   * Convert values to properties
+   * @return a property file
+   */
+  public Properties asProperties() {
+    Properties props = new Properties();
+    props.putAll(entries);
+    return props;
+  }
+
+  /**
+   * Return the values as json string
+   * @return the JSON representation
+   * @throws IOException marshalling failure
+   */
+  public String asJson() throws IOException {
+    ObjectMapper mapper = new ObjectMapper();
+    mapper.configure(SerializationConfig.Feature.INDENT_OUTPUT, true);
+    String json = mapper.writeValueAsString(entries);
+    return json;
+  }
+
+
+  /**
+   * This makes a copy without the nested content -so is suitable
+   * for returning as part of the list of a parent's values
+   * @return the copy
+   */
+  public PublishedConfiguration shallowCopy() {
+    PublishedConfiguration that = new PublishedConfiguration();
+    that.description = this.description;
+    that.updated = this.updated;
+    that.updatedTime = this.updatedTime;
+    return that;
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder sb =
+        new StringBuilder("PublishedConfiguration{");
+    sb.append("description='").append(description).append('\'');
+    sb.append(" entries = ").append(entries.size());
+    sb.append('}');
+    return sb.toString();
+  }
+
+  /**
+   * Create an outputter for a given format
+   * @param format format to use
+   * @return an instance of output
+   */
+  public PublishedConfigurationOutputter createOutputter(ConfigFormat format) {
+    return PublishedConfigurationOutputter.createOutputter(format, this);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java
new file mode 100644
index 0000000..88ecf2c
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Preconditions;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.service.api.records.ConfigFormat;
+import org.yaml.snakeyaml.DumperOptions;
+import org.yaml.snakeyaml.DumperOptions.FlowStyle;
+import org.yaml.snakeyaml.Yaml;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.StringWriter;
+import java.util.Properties;
+
+/**
+ * Output a published configuration
+ */
+public abstract class PublishedConfigurationOutputter {
+
+  private static final String COMMENTS = "Generated by Apache Slider";
+
+  protected final PublishedConfiguration owner;
+
+  protected PublishedConfigurationOutputter(PublishedConfiguration owner) {
+    this.owner = owner;
+  }
+
+  /**
+   * Save the config to a destination file, in the format of this outputter
+   * @param dest destination file
+   * @throws IOException
+   */
+/* JDK7
+  public void save(File dest) throws IOException {
+    try(FileOutputStream out = new FileOutputStream(dest)) {
+      save(out);
+      out.close();
+    }
+  }
+*/
+  public void save(File dest) throws IOException {
+    FileUtils.writeStringToFile(dest, asString(), Charsets.UTF_8);
+  }
+
+  /**
+   * Save the content. The default saves the asString() value
+   * to the output stream
+   * @param out output stream
+   * @throws IOException
+   */
+  public void save(OutputStream out) throws IOException {
+    IOUtils.write(asString(), out, Charsets.UTF_8);
+  }
+  /**
+   * Convert to a string
+   * @return the string form
+   * @throws IOException
+   */
+  public abstract String asString() throws IOException;
+
+  /**
+   * Create an outputter for the chosen format
+   * @param format format enumeration
+   * @param owner owning config
+   * @return the outputter
+   */
+
+  public static PublishedConfigurationOutputter createOutputter(ConfigFormat 
format,
+      PublishedConfiguration owner) {
+    Preconditions.checkNotNull(owner);
+    switch (format) {
+      case XML:
+      case HADOOP_XML:
+        return new XmlOutputter(owner);
+      case PROPERTIES:
+        return new PropertiesOutputter(owner);
+      case JSON:
+        return new JsonOutputter(owner);
+      case ENV:
+        return new EnvOutputter(owner);
+      case TEMPLATE:
+        return new TemplateOutputter(owner);
+      case YAML:
+        return new YamlOutputter(owner);
+      default:
+        throw new RuntimeException("Unsupported format :" + format);
+    }
+  }
+
+  public static class XmlOutputter extends PublishedConfigurationOutputter {
+
+
+    private final Configuration configuration;
+
+    public XmlOutputter(PublishedConfiguration owner) {
+      super(owner);
+      configuration = owner.asConfiguration();
+    }
+
+    @Override
+    public void save(OutputStream out) throws IOException {
+      configuration.writeXml(out);
+    }
+
+    @Override
+    public String asString() throws IOException {
+      return ConfigHelper.toXml(configuration);
+    }
+
+    public Configuration getConfiguration() {
+      return configuration;
+    }
+  }
+
+  public static class PropertiesOutputter extends 
PublishedConfigurationOutputter {
+
+    private final Properties properties;
+
+    public PropertiesOutputter(PublishedConfiguration owner) {
+      super(owner);
+      properties = owner.asProperties();
+    }
+
+    @Override
+    public void save(OutputStream out) throws IOException {
+      properties.store(out, COMMENTS);
+    }
+
+
+    public String asString() throws IOException {
+      StringWriter sw = new StringWriter();
+      properties.store(sw, COMMENTS);
+      return sw.toString();
+    }
+  }
+
+
+  public static class JsonOutputter extends PublishedConfigurationOutputter {
+
+    public JsonOutputter(PublishedConfiguration owner) {
+      super(owner);
+    }
+
+    @Override
+    public String asString() throws IOException {
+      return owner.asJson();
+    }
+  }
+
+
+  public static class EnvOutputter extends PublishedConfigurationOutputter {
+
+    public EnvOutputter(PublishedConfiguration owner) {
+      super(owner);
+    }
+
+    @Override
+    public String asString() throws IOException {
+      if (!owner.entries.containsKey("content")) {
+        throw new IOException("Configuration has no content field and cannot " 
+
+            "be retrieved as type 'env'");
+      }
+      String content = owner.entries.get("content");
+      return ConfigUtils.replaceProps(owner.entries, content);
+    }
+  }
+
+  public static class TemplateOutputter extends EnvOutputter {
+    public TemplateOutputter(PublishedConfiguration owner) {
+      super(owner);
+    }
+  }
+
+  public static class YamlOutputter extends PublishedConfigurationOutputter {
+
+    private final Yaml yaml;
+
+    public YamlOutputter(PublishedConfiguration owner) {
+      super(owner);
+      DumperOptions options = new DumperOptions();
+      options.setDefaultFlowStyle(FlowStyle.BLOCK);
+      yaml = new Yaml(options);
+    }
+
+    public String asString() throws IOException {
+      return yaml.dump(owner.entries);
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java
new file mode 100644
index 0000000..140204a
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SerializedApplicationReport.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.service.utils.ApplicationReportSerDeser;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
+import org.codehaus.jackson.map.annotate.JsonSerialize;
+
+import java.io.IOException;
+
+/**
+ * Serialized form of an service report which can be persisted
+ * and then parsed. It can not be converted back into a
+ * real YARN service report
+ * 
+ * Useful for testing
+ */
+
+@JsonIgnoreProperties(ignoreUnknown = true)
+@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
+
+public class SerializedApplicationReport {
+
+  public String applicationId;
+  public String applicationAttemptId;
+  public String name;
+  public String applicationType;
+  public String user;
+  public String queue;
+  public String host;
+  public Integer rpcPort;
+  public String state;
+  public String diagnostics;
+  public String url;
+  /**
+   * This value is non-null only when a report is generated from a submission 
context.
+   * The YARN {@link ApplicationReport} structure does not propagate this value
+   * from the RM.
+   */
+  public Long submitTime;
+  public Long startTime;
+  public Long finishTime;
+  public String finalStatus;
+  public String origTrackingUrl;
+  public Float progress;
+  
+  public SerializedApplicationReport() {
+  }
+  
+  public SerializedApplicationReport(ApplicationReport report) {
+    this.applicationId = report.getApplicationId().toString();
+    ApplicationAttemptId attemptId = report.getCurrentApplicationAttemptId();
+    this.applicationAttemptId = attemptId != null ? attemptId.toString() : 
"N/A";
+    this.name = report.getName();
+    this.applicationType = report.getApplicationType();
+    this.user = report.getUser();
+    this.queue = report.getQueue();
+    this.host = report.getHost();
+    this.rpcPort = report.getRpcPort();
+    this.state = report.getYarnApplicationState().toString();
+    this.diagnostics = report.getDiagnostics();
+    this.startTime = report.getStartTime();
+    this.finishTime = report.getFinishTime();
+    FinalApplicationStatus appStatus = report.getFinalApplicationStatus();
+    this.finalStatus = appStatus == null ? "" : appStatus.toString();
+    this.progress = report.getProgress();
+    this.url = report.getTrackingUrl();
+    this.origTrackingUrl= report.getOriginalTrackingUrl();
+  }
+
+  @Override
+  public String toString() {
+    try {
+      return ApplicationReportSerDeser.toString(this);
+    } catch (IOException e) {
+      return super.toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
new file mode 100644
index 0000000..de82580
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
@@ -0,0 +1,446 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.service.api.records.Service;
+import org.apache.hadoop.yarn.service.api.records.Artifact;
+import org.apache.hadoop.yarn.service.api.records.Component;
+import org.apache.hadoop.yarn.service.api.records.Configuration;
+import org.apache.hadoop.yarn.service.api.records.Resource;
+import org.apache.hadoop.yarn.service.provider.AbstractClientProvider;
+import org.apache.hadoop.yarn.service.provider.ProviderFactory;
+import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils;
+import org.apache.hadoop.yarn.service.conf.RestApiConstants;
+import org.apache.hadoop.yarn.service.exceptions.RestApiErrorMessages;
+import org.codehaus.jackson.map.PropertyNamingStrategy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class ServiceApiUtil {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ServiceApiUtil.class);
+  public static JsonSerDeser<Service> jsonSerDeser =
+      new JsonSerDeser<>(Service.class,
+          PropertyNamingStrategy.CAMEL_CASE_TO_LOWER_CASE_WITH_UNDERSCORES);
+  private static final PatternValidator namePattern
+      = new PatternValidator("[a-z][a-z0-9-]*");
+
+  @VisibleForTesting
+  public static void setJsonSerDeser(JsonSerDeser jsd) {
+    jsonSerDeser = jsd;
+  }
+
+  @VisibleForTesting
+  public static void validateAndResolveService(Service service,
+      SliderFileSystem fs, org.apache.hadoop.conf.Configuration conf) throws
+      IOException {
+    boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED,
+        RegistryConstants.DEFAULT_DNS_ENABLED);
+    if (dnsEnabled && RegistryUtils.currentUser().length() > RegistryConstants
+        .MAX_FQDN_LABEL_LENGTH) {
+      throw new IllegalArgumentException(RestApiErrorMessages
+          .ERROR_USER_NAME_INVALID);
+    }
+    if (StringUtils.isEmpty(service.getName())) {
+      throw new IllegalArgumentException(
+          RestApiErrorMessages.ERROR_APPLICATION_NAME_INVALID);
+    }
+
+    validateNameFormat(service.getName(), conf);
+
+    // If the service has no components do top-level checks
+    if (!hasComponent(service)) {
+      // If artifact is of type SERVICE, read other service components
+      if (service.getArtifact() != null && service.getArtifact()
+          .getType() == Artifact.TypeEnum.SERVICE) {
+        if (StringUtils.isEmpty(service.getArtifact().getId())) {
+          throw new IllegalArgumentException(
+              RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
+        }
+        Service otherService = loadService(fs,
+            service.getArtifact().getId());
+        service.setComponents(otherService.getComponents());
+        service.setArtifact(null);
+        SliderUtils.mergeMapsIgnoreDuplicateKeys(service.getQuicklinks(),
+            otherService.getQuicklinks());
+      } else {
+        // Since it is a simple service with no components, create a default
+        // component
+        Component comp = createDefaultComponent(service);
+        validateComponent(comp, fs.getFileSystem(), conf);
+        service.getComponents().add(comp);
+        if (service.getLifetime() == null) {
+          service.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME);
+        }
+        return;
+      }
+    }
+
+    // Validate there are no component name collisions (collisions are not
+    // currently supported) and add any components from external services
+    // TODO allow name collisions? see AppState#roles
+    // TODO or add prefix to external component names?
+    Configuration globalConf = service.getConfiguration();
+    Set<String> componentNames = new HashSet<>();
+    List<Component> componentsToRemove = new ArrayList<>();
+    List<Component> componentsToAdd = new ArrayList<>();
+    for (Component comp : service.getComponents()) {
+      int maxCompLength = RegistryConstants.MAX_FQDN_LABEL_LENGTH;
+      maxCompLength = maxCompLength - Long.toString(Long.MAX_VALUE).length();
+      if (dnsEnabled && comp.getName().length() > maxCompLength) {
+        throw new IllegalArgumentException(String.format(RestApiErrorMessages
+            .ERROR_COMPONENT_NAME_INVALID, maxCompLength, comp.getName()));
+      }
+      if (componentNames.contains(comp.getName())) {
+        throw new IllegalArgumentException("Component name collision: " +
+            comp.getName());
+      }
+      // If artifact is of type SERVICE (which cannot be filled from
+      // global), read external service and add its components to this
+      // service
+      if (comp.getArtifact() != null && comp.getArtifact().getType() ==
+          Artifact.TypeEnum.SERVICE) {
+        if (StringUtils.isEmpty(comp.getArtifact().getId())) {
+          throw new IllegalArgumentException(
+              RestApiErrorMessages.ERROR_ARTIFACT_ID_INVALID);
+        }
+        LOG.info("Marking {} for removal", comp.getName());
+        componentsToRemove.add(comp);
+        List<Component> externalComponents = getComponents(fs,
+            comp.getArtifact().getId());
+        for (Component c : externalComponents) {
+          Component override = service.getComponent(c.getName());
+          if (override != null && override.getArtifact() == null) {
+            // allow properties from external components to be overridden /
+            // augmented by properties in this component, except for artifact
+            // which must be read from external component
+            override.mergeFrom(c);
+            LOG.info("Merging external component {} from external {}", c
+                .getName(), comp.getName());
+          } else {
+            if (componentNames.contains(c.getName())) {
+              throw new IllegalArgumentException("Component name collision: " +
+                  c.getName());
+            }
+            componentNames.add(c.getName());
+            componentsToAdd.add(c);
+            LOG.info("Adding component {} from external {}", c.getName(),
+                comp.getName());
+          }
+        }
+      } else {
+        // otherwise handle as a normal component
+        componentNames.add(comp.getName());
+        // configuration
+        comp.getConfiguration().mergeFrom(globalConf);
+      }
+    }
+    service.getComponents().removeAll(componentsToRemove);
+    service.getComponents().addAll(componentsToAdd);
+
+    // Validate components and let global values take effect if component level
+    // values are not provided
+    Artifact globalArtifact = service.getArtifact();
+    Resource globalResource = service.getResource();
+    Long globalNumberOfContainers = service.getNumberOfContainers();
+    String globalLaunchCommand = service.getLaunchCommand();
+    for (Component comp : service.getComponents()) {
+      // fill in global artifact unless it is type SERVICE
+      if (comp.getArtifact() == null && service.getArtifact() != null
+          && service.getArtifact().getType() != Artifact.TypeEnum
+          .SERVICE) {
+        comp.setArtifact(globalArtifact);
+      }
+      // fill in global resource
+      if (comp.getResource() == null) {
+        comp.setResource(globalResource);
+      }
+      // fill in global container count
+      if (comp.getNumberOfContainers() == null) {
+        comp.setNumberOfContainers(globalNumberOfContainers);
+      }
+      // fill in global launch command
+      if (comp.getLaunchCommand() == null) {
+        comp.setLaunchCommand(globalLaunchCommand);
+      }
+      // validate dependency existence
+      if (comp.getDependencies() != null) {
+        for (String dependency : comp.getDependencies()) {
+          if (!componentNames.contains(dependency)) {
+            throw new IllegalArgumentException(String.format(
+                RestApiErrorMessages.ERROR_DEPENDENCY_INVALID, dependency,
+                comp.getName()));
+          }
+        }
+      }
+      validateComponent(comp, fs.getFileSystem(), conf);
+    }
+
+    // validate dependency tree
+    sortByDependencies(service.getComponents());
+
+    // Service lifetime if not specified, is set to unlimited lifetime
+    if (service.getLifetime() == null) {
+      service.setLifetime(RestApiConstants.DEFAULT_UNLIMITED_LIFETIME);
+    }
+  }
+
+  private static void validateComponent(Component comp, FileSystem fs,
+      org.apache.hadoop.conf.Configuration conf)
+      throws IOException {
+    validateNameFormat(comp.getName(), conf);
+
+    AbstractClientProvider compClientProvider = ProviderFactory
+        .getClientProvider(comp.getArtifact());
+    compClientProvider.validateArtifact(comp.getArtifact(), fs);
+
+    if (comp.getLaunchCommand() == null && (comp.getArtifact() == null || comp
+        .getArtifact().getType() != Artifact.TypeEnum.DOCKER)) {
+      throw new IllegalArgumentException(RestApiErrorMessages
+          .ERROR_ABSENT_LAUNCH_COMMAND);
+    }
+
+    validateServiceResource(comp.getResource(), comp);
+
+    if (comp.getNumberOfContainers() == null
+        || comp.getNumberOfContainers() < 0) {
+      throw new IllegalArgumentException(String.format(
+          RestApiErrorMessages.ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID
+              + ": " + comp.getNumberOfContainers(), comp.getName()));
+    }
+    compClientProvider.validateConfigFiles(comp.getConfiguration()
+        .getFiles(), fs);
+
+    MonitorUtils.getProbe(comp.getReadinessCheck());
+  }
+
+  // Check component or service name format and transform to lower case.
+  public static void validateNameFormat(String name,
+      org.apache.hadoop.conf.Configuration conf) {
+    if (StringUtils.isEmpty(name)) {
+      throw new IllegalArgumentException("Name can not be empty!");
+    }
+    // validate component name
+    if (name.contains("_")) {
+      throw new IllegalArgumentException(
+          "Invalid format: " + name
+              + ", can not use '_', as DNS hostname does not allow '_'. Use 
'-' Instead. ");
+    }
+    boolean dnsEnabled = conf.getBoolean(RegistryConstants.KEY_DNS_ENABLED,
+        RegistryConstants.DEFAULT_DNS_ENABLED);
+    if (dnsEnabled && name.length() > RegistryConstants.MAX_FQDN_LABEL_LENGTH) 
{
+      throw new IllegalArgumentException(String
+          .format("Invalid format %s, must be no more than 63 characters ",
+              name));
+    }
+    namePattern.validate(name);
+  }
+
+  @VisibleForTesting
+  public static List<Component> getComponents(SliderFileSystem
+      fs, String serviceName) throws IOException {
+    return loadService(fs, serviceName).getComponents();
+  }
+
+  public static Service loadService(SliderFileSystem fs, String
+      serviceName) throws IOException {
+    Path serviceJson = getServiceJsonPath(fs, serviceName);
+    LOG.info("Loading service definition from " + serviceJson);
+    return jsonSerDeser.load(fs.getFileSystem(), serviceJson);
+  }
+
+  public static Service loadServiceFrom(SliderFileSystem fs,
+      Path appDefPath) throws IOException {
+    LOG.info("Loading service definition from " + appDefPath);
+    return jsonSerDeser.load(fs.getFileSystem(), appDefPath);
+  }
+
+  public static Path getServiceJsonPath(SliderFileSystem fs, String 
serviceName) {
+    Path serviceDir = fs.buildClusterDirPath(serviceName);
+    return new Path(serviceDir, serviceName + ".json");
+  }
+
+  private static void validateServiceResource(Resource resource,
+      Component comp) {
+    // Only services/components of type SERVICE can skip resource requirement
+    if (resource == null) {
+      throw new IllegalArgumentException(
+          comp == null ? RestApiErrorMessages.ERROR_RESOURCE_INVALID : String
+              .format(RestApiErrorMessages.ERROR_RESOURCE_FOR_COMP_INVALID,
+                  comp.getName()));
+    }
+    // One and only one of profile OR cpus & memory can be specified. 
Specifying
+    // both raises validation error.
+    if (StringUtils.isNotEmpty(resource.getProfile()) && (
+        resource.getCpus() != null || StringUtils
+            .isNotEmpty(resource.getMemory()))) {
+      throw new IllegalArgumentException(comp == null ?
+          
RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED :
+          String.format(
+              
RestApiErrorMessages.ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED,
+              comp.getName()));
+    }
+    // Currently resource profile is not supported yet, so we will raise
+    // validation error if only resource profile is specified
+    if (StringUtils.isNotEmpty(resource.getProfile())) {
+      throw new IllegalArgumentException(
+          RestApiErrorMessages.ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET);
+    }
+
+    String memory = resource.getMemory();
+    Integer cpus = resource.getCpus();
+    if (StringUtils.isEmpty(memory)) {
+      throw new IllegalArgumentException(
+          comp == null ? RestApiErrorMessages.ERROR_RESOURCE_MEMORY_INVALID :
+              String.format(
+                  RestApiErrorMessages.ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID,
+                  comp.getName()));
+    }
+    if (cpus == null) {
+      throw new IllegalArgumentException(
+          comp == null ? RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID :
+              String.format(
+                  RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID,
+                  comp.getName()));
+    }
+    if (cpus <= 0) {
+      throw new IllegalArgumentException(comp == null ?
+          RestApiErrorMessages.ERROR_RESOURCE_CPUS_INVALID_RANGE : String
+          .format(
+              RestApiErrorMessages.ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE,
+              comp.getName()));
+    }
+  }
+
+  // check if comp mem size exceeds cluster limit
+  public static void validateCompResourceSize(
+      org.apache.hadoop.yarn.api.records.Resource maxResource,
+      Service service) throws YarnException {
+    for (Component component : service.getComponents()) {
+      // only handle mem now.
+      long mem = Long.parseLong(component.getResource().getMemory());
+      if (mem > maxResource.getMemorySize()) {
+        throw new YarnException(
+            "Component " + component.getName() + " memory size (" + mem
+                + ") is larger than configured max container memory size ("
+                + maxResource.getMemorySize() + ")");
+      }
+    }
+  }
+
+  public static boolean hasComponent(Service service) {
+    if (service.getComponents() == null || service.getComponents()
+        .isEmpty()) {
+      return false;
+    }
+    return true;
+  }
+
+  public static Component createDefaultComponent(Service service) {
+    Component comp = new Component();
+    comp.setName(RestApiConstants.DEFAULT_COMPONENT_NAME);
+    comp.setArtifact(service.getArtifact());
+    comp.setResource(service.getResource());
+    comp.setNumberOfContainers(service.getNumberOfContainers());
+    comp.setLaunchCommand(service.getLaunchCommand());
+    comp.setConfiguration(service.getConfiguration());
+    return comp;
+  }
+
+  public static Collection<Component> sortByDependencies(List<Component>
+      components) {
+    Map<String, Component> sortedComponents =
+        sortByDependencies(components, null);
+    return sortedComponents.values();
+  }
+
+  /**
+   * Each internal call of sortByDependencies will identify all of the
+   * components with the same dependency depth (the lowest depth that has not
+   * been processed yet) and add them to the sortedComponents list, preserving
+   * their original ordering in the components list.
+   *
+   * So the first time it is called, all components with no dependencies
+   * (depth 0) will be identified. The next time it is called, all components
+   * that have dependencies only on the the depth 0 components will be
+   * identified (depth 1). This will be repeated until all components have
+   * been added to the sortedComponents list. If no new components are
+   * identified but the sortedComponents list is not complete, an error is
+   * thrown.
+   */
+  private static Map<String, Component> sortByDependencies(List<Component>
+      components, Map<String, Component> sortedComponents) {
+    if (sortedComponents == null) {
+      sortedComponents = new LinkedHashMap<>();
+    }
+
+    Map<String, Component> componentsToAdd = new LinkedHashMap<>();
+    List<Component> componentsSkipped = new ArrayList<>();
+    for (Component component : components) {
+      String name = component.getName();
+      if (sortedComponents.containsKey(name)) {
+        continue;
+      }
+      boolean dependenciesAlreadySorted = true;
+      if (!SliderUtils.isEmpty(component.getDependencies())) {
+        for (String dependency : component.getDependencies()) {
+          if (!sortedComponents.containsKey(dependency)) {
+            dependenciesAlreadySorted = false;
+            break;
+          }
+        }
+      }
+      if (dependenciesAlreadySorted) {
+        componentsToAdd.put(name, component);
+      } else {
+        componentsSkipped.add(component);
+      }
+    }
+
+    if (componentsToAdd.size() == 0) {
+      throw new IllegalArgumentException(String.format(RestApiErrorMessages
+          .ERROR_DEPENDENCY_CYCLE, componentsSkipped));
+    }
+    sortedComponents.putAll(componentsToAdd);
+    if (sortedComponents.size() == components.size()) {
+      return sortedComponents;
+    }
+    return sortByDependencies(components, sortedComponents);
+  }
+
+  public static String $(String s) {
+    return "${" + s +"}";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java
new file mode 100644
index 0000000..7440b11
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
+
+
+public class ServiceRegistryUtils {
+
+  /**
+   * Base path for services
+   */
+  public static final String ZK_SERVICES = "services";
+
+  /**
+   * Base path for all Slider references
+   */
+  public static final String ZK_SLIDER = "slider";
+  public static final String ZK_USERS = "users";
+  public static final String SVC_SLIDER = "/" + ZK_SERVICES + "/" + ZK_SLIDER;
+  public static final String SVC_SLIDER_USERS = SVC_SLIDER + "/" + ZK_USERS;
+
+  /**
+   * Get the registry path for an instance under the user's home node
+   * @param instanceName application instance
+   * @return a path to the registry location for this application instance.
+   */
+  public static String registryPathForInstance(String instanceName) {
+    return RegistryUtils.servicePath(
+        RegistryUtils.currentUser(), YarnServiceConstants.APP_TYPE, 
instanceName
+    );
+  }
+
+  /**
+ * Build the path to a cluster; exists once the cluster has come up.
+ * Even before that, a ZK watcher could wait for it.
+ * @param username user
+ * @param clustername name of the cluster
+ * @return a strin
+ */
+  public static String mkClusterPath(String username, String clustername) {
+    return mkSliderUserPath(username) + "/" + clustername;
+  }
+
+  /**
+ * Build the path to a cluster; exists once the cluster has come up.
+ * Even before that, a ZK watcher could wait for it.
+ * @param username user
+ * @return a string
+ */
+  public static String mkSliderUserPath(String username) {
+    return SVC_SLIDER_USERS + "/" + username;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ccee80a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java
new file mode 100644
index 0000000..d6d664e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/SliderFileSystem.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.service.utils;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import java.io.IOException;
+
+/**
+ * Extends Core Filesystem with operations to manipulate ClusterDescription
+ * persistent state
+ */
+public class SliderFileSystem extends CoreFileSystem {
+
+  Path appDir = null;
+
+  public SliderFileSystem(FileSystem fileSystem,
+      Configuration configuration) {
+    super(fileSystem, configuration);
+  }
+
+  public SliderFileSystem(Configuration configuration) throws IOException {
+    super(configuration);
+  }
+
+  public void setAppDir(Path appDir) {
+    this.appDir = appDir;
+  }
+
+  public Path getAppDir() {
+    return this.appDir;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to