http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCompositeService.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCompositeService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCompositeService.java deleted file mode 100644 index 9c653f3..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowCompositeService.java +++ /dev/null @@ -1,167 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.services.workflow; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.service.CompositeService; -import org.apache.hadoop.service.Service; -import org.apache.hadoop.service.ServiceStateChangeListener; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.List; - -/** - * An extended composite service which stops itself if any child service - * fails, or when all its children have successfully stopped without failure. - * - * Lifecycle - * <ol> - * <li>If any child exits with a failure: this service stops, propagating - * the exception.</li> - * <li>When all child services has stopped, this service stops itself</li> - * </ol> - * - */ -public class WorkflowCompositeService extends CompositeService - implements ServiceParent, ServiceStateChangeListener { - - private static final Logger LOG = - LoggerFactory.getLogger(WorkflowCompositeService.class); - - /** - * Deadlock-avoiding overridden config for slider services; see SLIDER-1052 - */ - private volatile Configuration configuration; - - /** - * Construct an instance - * @param name name of this service instance - */ - public WorkflowCompositeService(String name) { - super(name); - } - - @Override - public Configuration getConfig() { - return configuration; - } - - @Override - protected void setConfig(Configuration conf) { - super.setConfig(conf); - configuration = conf; - } - - /** - * Construct an instance with the default name. - */ - public WorkflowCompositeService() { - this("WorkflowCompositeService"); - } - - /** - * Varargs constructor - * @param name name of this service instance - * @param children children - */ - public WorkflowCompositeService(String name, Service... children) { - this(name); - for (Service child : children) { - addService(child); - } - } - - /** - * Construct with a list of children - * @param name name of this service instance - * @param children children to add - */ - public WorkflowCompositeService(String name, List<Service> children) { - this(name); - for (Service child : children) { - addService(child); - } - } - - /** - * Add a service, and register it - * @param service the {@link Service} to be added. - * Important: do not add a service to a parent during your own serviceInit/start, - * in Hadoop 2.2; you will trigger a ConcurrentModificationException. - */ - @Override - public synchronized void addService(Service service) { - Preconditions.checkArgument(service != null, "null service argument"); - service.registerServiceListener(this); - super.addService(service); - } - - /** - * When this service is started, any service stopping with a failure - * exception is converted immediately into a failure of this service, - * storing the failure and stopping ourselves. - * @param child the service that has changed. - */ - @Override - public void stateChanged(Service child) { - //if that child stopped while we are running: - if (isInState(STATE.STARTED) && child.isInState(STATE.STOPPED)) { - // a child service has stopped - //did the child fail? if so: propagate - Throwable failureCause = child.getFailureCause(); - if (failureCause != null) { - LOG.info("Child service " + child + " failed", failureCause); - //failure. Convert to an exception - Exception e = (failureCause instanceof Exception) ? - (Exception) failureCause : new Exception(failureCause); - //flip ourselves into the failed state - noteFailure(e); - stop(); - } else { - LOG.info("Child service completed {}", child); - if (areAllChildrenStopped()) { - LOG.info("All children are halted: stopping"); - stop(); - } - } - } - } - - /** - * Probe to query if all children are stopped -simply - * by taking a snapshot of the child service list and enumerating - * their state. - * The state of the children may change during this operation -that will - * not get picked up. - * @return true if all the children are stopped. - */ - private boolean areAllChildrenStopped() { - List<Service> children = getServices(); - boolean stopped = true; - for (Service child : children) { - if (!child.isInState(STATE.STOPPED)) { - stopped = false; - break; - } - } - return stopped; - } -}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowExecutorService.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowExecutorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowExecutorService.java deleted file mode 100644 index 7409d32..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowExecutorService.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.services.workflow; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.service.AbstractService; - -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; - -/** - * A service that hosts an executor -when the service is stopped, - * {@link ExecutorService#shutdownNow()} is invoked. - */ -public class WorkflowExecutorService<E extends ExecutorService> extends AbstractService { - - private E executor; - - /** - * Construct an instance with the given name -but - * no executor - * @param name service name - */ - public WorkflowExecutorService(String name) { - this(name, null); - } - - /** - * Construct an instance with the given name and executor - * @param name service name - * @param executor exectuor - */ - public WorkflowExecutorService(String name, - E executor) { - super(name); - this.executor = executor; - } - - /** - * Get the executor - * @return the executor - */ - public synchronized E getExecutor() { - return executor; - } - - /** - * Set the executor. Only valid if the current one is null - * @param executor executor - */ - public synchronized void setExecutor(E executor) { - Preconditions.checkState(this.executor == null, - "Executor already set"); - this.executor = executor; - } - - /** - * Execute the runnable with the executor (which - * must have been created already) - * @param runnable runnable to execute - */ - public void execute(Runnable runnable) { - getExecutor().execute(runnable); - } - - /** - * Submit a callable - * @param callable callable - * @param <V> type of the final get - * @return a future to wait on - */ - public <V> Future<V> submit(Callable<V> callable) { - return getExecutor().submit(callable); - } - - /** - * Stop the service: halt the executor. - * @throws Exception exception. - */ - @Override - protected void serviceStop() throws Exception { - stopExecutor(); - super.serviceStop(); - } - - /** - * Stop the executor if it is not null. - * This uses {@link ExecutorService#shutdownNow()} - * and so does not block until they have completed. - */ - protected synchronized void stopExecutor() { - if (executor != null) { - executor.shutdownNow(); - } - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowRpcService.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowRpcService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowRpcService.java deleted file mode 100644 index b71530f..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowRpcService.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.services.workflow; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.service.AbstractService; - -import java.net.InetSocketAddress; - -/** - * A YARN service that maps the start/stop lifecycle of an RPC server - * to the YARN service lifecycle. - */ -public class WorkflowRpcService extends AbstractService { - - /** RPC server*/ - private final Server server; - - /** - * Construct an instance - * @param name service name - * @param server service to stop - */ - public WorkflowRpcService(String name, Server server) { - super(name); - Preconditions.checkArgument(server != null, "Null server"); - this.server = server; - } - - /** - * Get the server - * @return the server - */ - public Server getServer() { - return server; - } - - /** - * Get the socket address of this server - * @return the address this server is listening on - */ - public InetSocketAddress getConnectAddress() { - return NetUtils.getConnectAddress(server); - } - - @Override - protected void serviceStart() throws Exception { - super.serviceStart(); - server.start(); - } - - @Override - protected void serviceStop() throws Exception { - if (server != null) { - server.stop(); - } - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowScheduledExecutorService.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowScheduledExecutorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowScheduledExecutorService.java deleted file mode 100644 index e9f53ed..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowScheduledExecutorService.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.services.workflow; - -import java.util.concurrent.ScheduledExecutorService; - -/** - * Scheduled executor or subclass thereof - * @param <E> scheduled executor service type - */ -public class WorkflowScheduledExecutorService<E extends ScheduledExecutorService> - extends WorkflowExecutorService<E> { - - public WorkflowScheduledExecutorService(String name) { - super(name); - } - - public WorkflowScheduledExecutorService(String name, - E executor) { - super(name, executor); - } -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowSequenceService.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowSequenceService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowSequenceService.java deleted file mode 100644 index 97f97e8..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/WorkflowSequenceService.java +++ /dev/null @@ -1,306 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.services.workflow; - -import com.google.common.base.Preconditions; -import org.apache.hadoop.service.AbstractService; -import org.apache.hadoop.service.Service; -import org.apache.hadoop.service.ServiceStateChangeListener; -import org.apache.hadoop.service.ServiceStateException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - * This resembles the YARN CompositeService, except that it - * starts one service after another - * - * Workflow - * <ol> - * <li>When the <code>WorkflowSequenceService</code> instance is - * initialized, it only initializes itself.</li> - * - * <li>When the <code>WorkflowSequenceService</code> instance is - * started, it initializes then starts the first of its children. - * If there are no children, it immediately stops.</li> - * - * <li>When the active child stops, it did not fail, and the parent has not - * stopped -then the next service is initialized and started. If there is no - * remaining child the parent service stops.</li> - * - * <li>If the active child did fail, the parent service notes the exception - * and stops -effectively propagating up the failure. - * </li> - * </ol> - * - * New service instances MAY be added to a running instance -but no guarantees - * can be made as to whether or not they will be run. - */ - -public class WorkflowSequenceService extends AbstractService implements - ServiceParent, ServiceStateChangeListener { - - private static final Logger LOG = - LoggerFactory.getLogger(WorkflowSequenceService.class); - - /** - * list of services - */ - private final List<Service> serviceList = new ArrayList<>(); - - /** - * The currently active service. - * Volatile -may change & so should be read into a - * local variable before working with - */ - private volatile Service activeService; - - /** - the previous service -the last one that finished. - null if one did not finish yet - */ - private volatile Service previousService; - - private boolean stopIfNoChildServicesAtStartup = true; - - /** - * Construct an instance - * @param name service name - */ - public WorkflowSequenceService(String name) { - super(name); - } - - /** - * Construct an instance with the default name - */ - public WorkflowSequenceService() { - this("WorkflowSequenceService"); - } - - /** - * Create a service sequence with the given list of services - * @param name service name - * @param children initial sequence - */ - public WorkflowSequenceService(String name, Service... children) { - super(name); - for (Service service : children) { - addService(service); - } - } /** - * Create a service sequence with the given list of services - * @param name service name - * @param children initial sequence - */ - public WorkflowSequenceService(String name, List<Service> children) { - super(name); - for (Service service : children) { - addService(service); - } - } - - /** - * Get the current service -which may be null - * @return service running - */ - public Service getActiveService() { - return activeService; - } - - /** - * Get the previously active service - * @return the service last run, or null if there is none. - */ - public Service getPreviousService() { - return previousService; - } - - protected void setStopIfNoChildServicesAtStartup(boolean stopIfNoChildServicesAtStartup) { - this.stopIfNoChildServicesAtStartup = stopIfNoChildServicesAtStartup; - } - - /** - * When started - * @throws Exception - */ - @Override - protected void serviceStart() throws Exception { - if (!startNextService() && stopIfNoChildServicesAtStartup) { - //nothing to start -so stop - stop(); - } - } - - @Override - protected void serviceStop() throws Exception { - //stop current service. - //this triggers a callback that is caught and ignored - Service current = activeService; - previousService = current; - activeService = null; - if (current != null) { - current.stop(); - } - } - - /** - * Start the next service in the list. - * Return false if there are no more services to run, or this - * service has stopped - * @return true if a service was started - * @throws RuntimeException from any init or start failure - * @throws ServiceStateException if this call is made before - * the service is started - */ - public synchronized boolean startNextService() { - if (isInState(STATE.STOPPED)) { - //downgrade to a failed - LOG.debug("Not starting next service -{} is stopped", this); - return false; - } - if (!isInState(STATE.STARTED)) { - //reject attempts to start a service too early - throw new ServiceStateException( - "Cannot start a child service when not started"); - } - if (serviceList.isEmpty()) { - //nothing left to run - return false; - } - if (activeService != null && activeService.getFailureCause() != null) { - //did the last service fail? Is this caused by some premature callback? - LOG.debug("Not starting next service due to a failure of {}", - activeService); - return false; - } - //bear in mind that init & start can fail, which - //can trigger re-entrant calls into the state change listener. - //by setting the current service to null - //the start-next-service logic is skipped. - //now, what does that mean w.r.t exit states? - - activeService = null; - Service head = serviceList.remove(0); - - try { - head.init(getConfig()); - head.registerServiceListener(this); - head.start(); - } catch (RuntimeException e) { - noteFailure(e); - throw e; - } - //at this point the service must have explicitly started & not failed, - //else an exception would have been raised - activeService = head; - return true; - } - - /** - * State change event relays service stop events to - * {@link #onServiceCompleted(Service)}. Subclasses can - * extend that with extra logic - * @param service the service that has changed. - */ - @Override - public void stateChanged(Service service) { - // only react to the state change when it is the current service - // and it has entered the STOPPED state - if (service == activeService && service.isInState(STATE.STOPPED)) { - onServiceCompleted(service); - } - } - - /** - * handler for service completion: base class starts the next service - * @param service service that has completed - */ - protected synchronized void onServiceCompleted(Service service) { - LOG.info("Running service stopped: {}", service); - previousService = activeService; - - - //start the next service if we are not stopped ourselves - if (isInState(STATE.STARTED)) { - - //did the service fail? if so: propagate - Throwable failureCause = service.getFailureCause(); - if (failureCause != null) { - Exception e = (failureCause instanceof Exception) ? - (Exception) failureCause : new Exception(failureCause); - noteFailure(e); - stop(); - } - - //start the next service - boolean started; - try { - started = startNextService(); - } catch (Exception e) { - //something went wrong here - noteFailure(e); - started = false; - } - if (!started) { - //no start because list is empty - //stop and expect the notification to go upstream - stop(); - } - } else { - //not started, so just note that the current service - //has gone away - activeService = null; - } - } - - /** - * Add the passed {@link Service} to the list of services managed by this - * {@link WorkflowSequenceService} - * @param service the {@link Service} to be added - */ - @Override - public synchronized void addService(Service service) { - Preconditions.checkArgument(service != null, "null service argument"); - LOG.debug("Adding service {} ", service.getName()); - synchronized (serviceList) { - serviceList.add(service); - } - } - - /** - * Get an unmodifiable list of services - * @return a list of child services at the time of invocation - - * added services will not be picked up. - */ - @Override //Parent - public synchronized List<Service> getServices() { - return Collections.unmodifiableList(serviceList); - } - - @Override // Object - public synchronized String toString() { - return super.toString() + "; current service " + activeService - + "; queued service count=" + serviceList.size(); - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/package-info.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/package-info.java deleted file mode 100644 index 36d059a..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/workflow/package-info.java +++ /dev/null @@ -1,172 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.services.workflow; - -/** - -<p> - This package contains classes which can be aggregated to build up - complex workflows of services: sequences of operations, callbacks - and composite services with a shared lifespan. - </p> - -<h2> - Core concepts: -</h2> - - -<p> -The Workflow Services are set of Hadoop YARN services, all implementing -the {@link org.apache.hadoop.service.Service} API. -They are designed to be aggregated, to be composed to produce larger -composite services which than perform ordered operations, notify other services -when work has completed, and to propagate failure up the service hierarchy. -</p> -<p> -Service instances may a limited lifespan, and may self-terminate when -they consider it appropriate.</p> -<p> -Workflow Services that have children implement the -{@link org.apache.slider.server.services.workflow.ServiceParent} -class, which provides (thread-safe) access to the children -allowing new children -to be added, and existing children to be ennumerated. The implement policies -on how to react to the termination of children -so can sequence operations -which terminate themselves when complete. -</p> - -<p> -Workflow Services may be subclassed to extend their behavior, or to use them -in specific applications. Just as the standard -{@link org.apache.hadoop.service.CompositeService} -is often subclassed to aggregate child services, the -{@link org.apache.slider.server.services.workflow.WorkflowCompositeService} -can be used instead -adding the feature that failing services trigger automatic -parent shutdown. If that is the desired operational mode of a class, -swapping the composite service implementation may be sufficient to adopt it. -</p> - - -<h2> How do the workflow services differ from the standard YARN services? </h2> - - <p> - - There is exactly one standard YARN service for managing children, the - {@link org.apache.hadoop.service.CompositeService}. - </p><p> - The {@link org.apache.slider.server.services.workflow.WorkflowCompositeService} - shares the same model of "child services, all inited and started together". - Where it differs is that if any child service stops -either due to a failure - or to an action which invokes that service's - {@link org.apache.hadoop.service.Service#stop()} method. - </p> - <p> - -In contrast, the original <code>CompositeService</code> class starts its children -in its{@link org.apache.hadoop.service.Service#start()} method, but does not -listen or react to any child service halting. As a result, changes in child -state are not automatically detected or propagated, other than failures in -the actual init() and start() methods. -</p> - -<p> -If a child service runs until completed -that is it will not be stopped until -instructed to do so, and if it is only the parent service that attempts to -stop the child, then this difference is unimportant. -</p> -<p> - -However, if any service that depends upon all it child services running - -and if those child services are written so as to stop when they fail, using -the <code>WorkflowCompositeService</code> as a base class will enable the -parent service to be automatically notified of a child stopping. - -</p> -<p> -The {@link org.apache.slider.server.services.workflow.WorkflowSequenceService} -resembles the composite service in API, but its workflow is different. It -initializes and starts its children one-by-one, only starting the second after -the first one succeeds, the third after the second, etc. If any service in -the sequence fails, the parent <code>WorkflowSequenceService</code> stops, -reporting the same exception. -</p> - -<p> -The {@link org.apache.slider.server.services.workflow.ForkedProcessService}: -Executes a process when started, and binds to the life of that process. When the -process terminates, so does the service -and vice versa. This service enables -external processes to be executed as part of a sequence of operations -or, -using the {@link org.apache.slider.server.services.workflow.WorkflowCompositeService} -in parallel with other services, terminating the process when the other services -stop -and vice versa. -</p> - -<p> -The {@link org.apache.slider.server.services.workflow.WorkflowCallbackService} -executes a {@link java.util.concurrent.Callable} callback a specified delay -after the service is started, then potentially terminates itself. -This is useful for callbacks when a workflow reaches a specific point --or simply for executing arbitrary code in the workflow. - - </p> - - -<h2> -Other Workflow Services -</h2> - -There are some minor services that have proven useful within aggregate workflows, -and simply in applications which are built from composite YARN services. - - <ul> - <li>{@link org.apache.slider.server.services.workflow.WorkflowRpcService }: - Maintains a reference to an RPC {@link org.apache.hadoop.ipc.Server} instance. - When the service is started, so is the RPC server. Similarly, when the service - is stopped, so is the RPC server instance. - </li> - - <li>{@link org.apache.slider.server.services.workflow.ClosingService}: Closes - an instance of {@link java.io.Closeable} when the service is stopped. This - is purely a housekeeping class. - </li> - - </ul> - - Lower-level classes - <ul> - <li>{@link org.apache.slider.server.services.workflow.ServiceTerminatingRunnable }: - A {@link java.lang.Runnable} which runs the runnable supplied in its constructor - then signals its owning service to stop once that runnable is completed. - Any exception raised in the run is stored. - </li> - <li>{@link org.apache.slider.server.services.workflow.WorkflowExecutorService}: - A base class for services that wish to have a {@link java.util.concurrent.ExecutorService} - with a lifespan mapped to that of a service. When the service is stopped, the - {@link java.util.concurrent.ExecutorService#shutdownNow()} method is called to - attempt to shut down all running tasks. - </li> - <li>{@link org.apache.slider.server.services.workflow.ServiceThreadFactory}: - This is a simple {@link java.util.concurrent.ThreadFactory} which generates - meaningful thread names. It can be used as a parameter to constructors of - {@link java.util.concurrent.ExecutorService} instances, to ensure that - log information can tie back text to the related services</li> - </ul> - - - - */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java deleted file mode 100644 index 76ce7a5..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/server/services/yarnregistry/YarnRegistryViewForProviders.java +++ /dev/null @@ -1,226 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.server.services.yarnregistry; - -import com.google.common.base.Preconditions; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.PathNotFoundException; -import org.apache.hadoop.registry.client.api.RegistryConstants; -import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.registry.client.api.BindFlags; -import org.apache.hadoop.registry.client.api.RegistryOperations; -import org.apache.hadoop.registry.client.binding.RegistryUtils; -import org.apache.hadoop.registry.client.binding.RegistryPathUtils; - -import org.apache.hadoop.registry.client.types.ServiceRecord; -import org.apache.hadoop.yarn.service.compinstance.ComponentInstance; -import org.apache.hadoop.yarn.service.compinstance.ComponentInstanceId; -import org.apache.slider.common.tools.SliderUtils; - -import java.io.IOException; -import java.util.List; - -import static org.apache.hadoop.registry.client.binding.RegistryPathUtils.join; - -/** - * Registry view for providers. This tracks where the service - * is registered, offers access to the record and other things. - */ -public class YarnRegistryViewForProviders { - private static final Log LOG = - LogFactory.getLog(YarnRegistryViewForProviders.class); - - private final RegistryOperations registryOperations; - private final String user; - private final String sliderServiceClass; - private final String instanceName; - /** - * Record used where the service registered itself. - * Null until the service is registered - */ - private ServiceRecord selfRegistration; - - /** - * Path where record was registered - * Null until the service is registered - */ - private String selfRegistrationPath; - - public YarnRegistryViewForProviders(RegistryOperations registryOperations, - String user, - String sliderServiceClass, - String instanceName, - ApplicationAttemptId applicationAttemptId) { - Preconditions.checkArgument(registryOperations != null, - "null registry operations"); - Preconditions.checkArgument(user != null, "null user"); - Preconditions.checkArgument(SliderUtils.isSet(sliderServiceClass), - "unset service class"); - Preconditions.checkArgument(SliderUtils.isSet(instanceName), - "instanceName"); - Preconditions.checkArgument(applicationAttemptId != null, - "null applicationAttemptId"); - this.registryOperations = registryOperations; - this.user = user; - this.sliderServiceClass = sliderServiceClass; - this.instanceName = instanceName; - } - - public String getUser() { - return user; - } - - - private void setSelfRegistration(ServiceRecord selfRegistration) { - this.selfRegistration = selfRegistration; - } - - /** - * Get the path to where the service has registered itself. - * Null until the service is registered - * @return the service registration path. - */ - public String getSelfRegistrationPath() { - return selfRegistrationPath; - } - - /** - * Get the absolute path to where the service has registered itself. - * This includes the base registry path - * Null until the service is registered - * @return the service registration path. - */ - public String getAbsoluteSelfRegistrationPath() { - if (selfRegistrationPath == null) { - return null; - } - String root = registryOperations.getConfig().getTrimmed( - RegistryConstants.KEY_REGISTRY_ZK_ROOT, - RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); - return RegistryPathUtils.join(root, selfRegistrationPath); - } - - /** - * Add a component under the slider name/entry - * @param componentName component name - * @param record record to put - * @throws IOException - */ - public void putComponent(String componentName, - ServiceRecord record) throws - IOException { - putComponent(sliderServiceClass, instanceName, - componentName, - record); - } - - /** - * Add a component - * @param serviceClass service class to use under ~user - * @param componentName component name - * @param record record to put - * @throws IOException - */ - public void putComponent(String serviceClass, - String serviceName, - String componentName, - ServiceRecord record) throws IOException { - String path = RegistryUtils.componentPath( - user, serviceClass, serviceName, componentName); - registryOperations.mknode(RegistryPathUtils.parentOf(path), true); - registryOperations.bind(path, record, BindFlags.OVERWRITE); - } - - /** - * Add a service under a path, optionally purging any history - * @param username user - * @param serviceClass service class to use under ~user - * @param serviceName name of the service - * @param record service record - * @param deleteTreeFirst perform recursive delete of the path first. - * @return the path the service was created at - * @throws IOException - */ - public String putService(String username, - String serviceClass, - String serviceName, - ServiceRecord record, - boolean deleteTreeFirst) throws IOException { - String path = RegistryUtils.servicePath( - username, serviceClass, serviceName); - if (deleteTreeFirst) { - registryOperations.delete(path, true); - } - registryOperations.mknode(RegistryPathUtils.parentOf(path), true); - registryOperations.bind(path, record, BindFlags.OVERWRITE); - return path; - } - - /** - * Add a service under a path for the current user - * @param record service record - * @param deleteTreeFirst perform recursive delete of the path first - * @return the path the service was created at - * @throws IOException - */ - public String registerSelf( - ServiceRecord record, - boolean deleteTreeFirst) throws IOException { - selfRegistrationPath = - putService(user, sliderServiceClass, instanceName, record, deleteTreeFirst); - setSelfRegistration(record); - return selfRegistrationPath; - } - - /** - * Delete a component - * @param containerId component name - * @throws IOException - */ - public void deleteComponent(ComponentInstanceId instanceId, - String containerId) throws IOException { - String path = RegistryUtils.componentPath( - user, sliderServiceClass, instanceName, - containerId); - LOG.info(instanceId + ": Deleting registry path " + path); - registryOperations.delete(path, false); - } - - /** - * Delete the children of a path -but not the path itself. - * It is not an error if the path does not exist - * @param path path to delete - * @param recursive flag to request recursive deletes - * @throws IOException IO problems - */ - public void deleteChildren(String path, boolean recursive) throws IOException { - List<String> childNames = null; - try { - childNames = registryOperations.list(path); - } catch (PathNotFoundException e) { - return; - } - for (String childName : childNames) { - String child = join(path, childName); - registryOperations.delete(child, recursive); - } - } - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java deleted file mode 100644 index daaf0e9..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiConstants.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.util; - -public interface RestApiConstants { - String CONTEXT_ROOT = "/services/v1"; - String APPLICATIONS_API_RESOURCE_PATH = "/applications"; - String CONTAINERS_API_RESOURCE_PATH = "/containers"; - String SLIDER_APPMASTER_COMPONENT_NAME = "slider-appmaster"; - String SLIDER_CONFIG_SCHEMA = "http://example.org/specification/v2.0.0"; - String METAINFO_SCHEMA_VERSION = "2.1"; - String COMPONENT_TYPE_YARN_DOCKER = "yarn_docker"; - - String DEFAULT_START_CMD = "/bootstrap/privileged-centos6-sshd"; - String DEFAULT_COMPONENT_NAME = "default"; - String DEFAULT_IMAGE = "centos:centos6"; - String DEFAULT_NETWORK = "bridge"; - String DEFAULT_COMMAND_PATH = "/usr/bin/docker"; - String DEFAULT_USE_NETWORK_SCRIPT = "yes"; - - String PLACEHOLDER_APP_NAME = "${APP_NAME}"; - String PLACEHOLDER_APP_COMPONENT_NAME = "${APP_COMPONENT_NAME}"; - String PLACEHOLDER_COMPONENT_ID = "${COMPONENT_ID}"; - - String PROPERTY_REST_SERVICE_HOST = "REST_SERVICE_HOST"; - String PROPERTY_REST_SERVICE_PORT = "REST_SERVICE_PORT"; - String PROPERTY_APP_LIFETIME = "docker.lifetime"; - String PROPERTY_APP_RUNAS_USER = "APP_RUNAS_USER"; - Long DEFAULT_UNLIMITED_LIFETIME = -1l; - - Integer HTTP_STATUS_CODE_ACCEPTED = 202; - String ARTIFACT_TYPE_SLIDER_ZIP = "slider-zip"; - - Integer GET_APPLICATIONS_THREAD_POOL_SIZE = 200; - - String PROPERTY_PYTHON_PATH = "python.path"; - String PROPERTY_DNS_DEPENDENCY = "site.global.dns.dependency"; - - String COMMAND_ORDER_SUFFIX_START = "-START"; - String COMMAND_ORDER_SUFFIX_STARTED = "-RUNNING_BUT_UNREADY"; - String EXPORT_GROUP_NAME = "QuickLinks"; - - Integer ERROR_CODE_APP_DOES_NOT_EXIST = 404001; - Integer ERROR_CODE_APP_IS_NOT_RUNNING = 404002; - Integer ERROR_CODE_APP_SUBMITTED_BUT_NOT_RUNNING_YET = 404003; - Integer ERROR_CODE_APP_NAME_INVALID = 404004; - -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java deleted file mode 100644 index 74f7e06..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/java/org/apache/slider/util/RestApiErrorMessages.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.slider.util; - -public interface RestApiErrorMessages { - String ERROR_APPLICATION_NAME_INVALID = - "Application name is either empty or not provided"; - String ERROR_APPLICATION_NAME_INVALID_FORMAT = - "Application name %s is not valid - only lower case letters, digits, " + - "underscore and hyphen are allowed, and the name must be no more " + - "than 63 characters"; - String ERROR_COMPONENT_NAME_INVALID = - "Component name must be no more than %s characters: %s"; - String ERROR_USER_NAME_INVALID = - "User name must be no more than 63 characters"; - - String ERROR_APPLICATION_NOT_RUNNING = "Application not running"; - String ERROR_APPLICATION_DOES_NOT_EXIST = "Application not found"; - String ERROR_APPLICATION_IN_USE = "Application already exists in started" - + " state"; - String ERROR_APPLICATION_INSTANCE_EXISTS = "Application already exists in" - + " stopped/failed state (either restart with PUT or destroy with DELETE" - + " before creating a new one)"; - - String ERROR_SUFFIX_FOR_COMPONENT = - " for component %s (nor at the global level)"; - String ERROR_ARTIFACT_INVALID = "Artifact is not provided"; - String ERROR_ARTIFACT_FOR_COMP_INVALID = - ERROR_ARTIFACT_INVALID + ERROR_SUFFIX_FOR_COMPONENT; - String ERROR_ARTIFACT_ID_INVALID = - "Artifact id (like docker image name) is either empty or not provided"; - String ERROR_ARTIFACT_ID_FOR_COMP_INVALID = - ERROR_ARTIFACT_ID_INVALID + ERROR_SUFFIX_FOR_COMPONENT; - - String ERROR_RESOURCE_INVALID = "Resource is not provided"; - String ERROR_RESOURCE_FOR_COMP_INVALID = - ERROR_RESOURCE_INVALID + ERROR_SUFFIX_FOR_COMPONENT; - String ERROR_RESOURCE_MEMORY_INVALID = - "Application resource or memory not provided"; - String ERROR_RESOURCE_CPUS_INVALID = - "Application resource or cpus not provided"; - String ERROR_RESOURCE_CPUS_INVALID_RANGE = - "Unacceptable no of cpus specified, either zero or negative"; - String ERROR_RESOURCE_MEMORY_FOR_COMP_INVALID = - ERROR_RESOURCE_MEMORY_INVALID + ERROR_SUFFIX_FOR_COMPONENT; - String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID = - ERROR_RESOURCE_CPUS_INVALID + ERROR_SUFFIX_FOR_COMPONENT; - String ERROR_RESOURCE_CPUS_FOR_COMP_INVALID_RANGE = - ERROR_RESOURCE_CPUS_INVALID_RANGE - + " for component %s (or at the global level)"; - String ERROR_CONTAINERS_COUNT_INVALID = - "Invalid no of containers specified"; - String ERROR_CONTAINERS_COUNT_FOR_COMP_INVALID = - ERROR_CONTAINERS_COUNT_INVALID + ERROR_SUFFIX_FOR_COMPONENT; - String ERROR_DEPENDENCY_INVALID = "Dependency %s for component %s is " + - "invalid, does not exist as a component"; - String ERROR_DEPENDENCY_CYCLE = "Invalid dependencies, a cycle may " + - "exist: %s"; - - String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED = - "Cannot specify" + " cpus/memory along with profile"; - String ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_FOR_COMP_NOT_SUPPORTED = - ERROR_RESOURCE_PROFILE_MULTIPLE_VALUES_NOT_SUPPORTED - + " for component %s"; - String ERROR_RESOURCE_PROFILE_NOT_SUPPORTED_YET = - "Resource profile is not " + "supported yet. Please specify cpus/memory."; - - String ERROR_NULL_ARTIFACT_ID = - "Artifact Id can not be null if artifact type is none"; - String ERROR_ABSENT_NUM_OF_INSTANCE = - "Num of instances should appear either globally or per component"; - String ERROR_ABSENT_LAUNCH_COMMAND = - "Launch_command is required when type is not DOCKER"; - - String ERROR_QUICKLINKS_FOR_COMP_INVALID = "Quicklinks specified at" - + " component level, needs corresponding values set at application level"; -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto deleted file mode 100644 index 691f861..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterMessages.proto +++ /dev/null @@ -1,392 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -option java_package = "org.apache.slider.api.proto"; -option java_outer_classname = "Messages"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package org.apache.slider.api; - -//import "Security.proto"; - -/* - Look at SliderClusterProtocol.proto to see how to build this -*/ - -message RoleInstanceState { - required string name = 1; - optional string role = 2; - required uint32 state = 4; - required uint32 exitCode = 5; - optional string command = 6; - optional string diagnostics = 7; - repeated string output = 8; - repeated string environment = 9; - required uint32 roleId = 10; - required bool released = 11; - required int64 createTime = 12; - required int64 startTime = 13; - required string host = 14; - required string hostURL = 15; - optional string appVersion = 16; -} - -/** - * stop the cluster - */ -message StopClusterRequestProto { - /** - message to include - */ - required string message = 1; -} - -/** - * stop the cluster - */ -message StopClusterResponseProto { -} - -/** - * upgrade the containers - */ -message UpgradeContainersRequestProto { - /** - message to include - */ - required string message = 1; - repeated string container = 2; - repeated string component = 3; -} - -/** - * upgrade the containers - */ -message UpgradeContainersResponseProto { -} - -message FlexComponentsRequestProto { - repeated ComponentCountProto components = 1; -} - -message ComponentCountProto { - optional string name = 1; - optional int64 numberOfContainers = 2; -} - -message FlexComponentsResponseProto { -} - -/** - * void request - */ -message GetJSONClusterStatusRequestProto { -} - -/** - * response - */ -message GetJSONClusterStatusResponseProto { - required string clusterSpec = 1; -} - -/** - * list the nodes in a role - */ -message ListNodeUUIDsByRoleRequestProto { - required string role = 1; -} - -/** - * list the nodes in a role - */ -message ListNodeUUIDsByRoleResponseProto { - repeated string uuid = 1 ; -} - -/** - * get a node - */ -message GetNodeRequestProto { - required string uuid = 1; -} - - -/** - * response on a node - */ -message GetNodeResponseProto { - required RoleInstanceState clusterNode = 1 ; -} - -/** - * list the nodes for the UUDs - */ -message GetClusterNodesRequestProto { - repeated string uuid = 1 ; -} - -/** - * list the nodes in a role - */ -message GetClusterNodesResponseProto { - repeated RoleInstanceState clusterNode = 1 ; -} - -/** - * Echo - */ -message EchoRequestProto { - required string text = 1; -} - -/** - * Echo reply - */ -message EchoResponseProto { - required string text = 1; -} - - -/** - * Kill a container - */ -message KillContainerRequestProto { - required string id = 1; -} - -/** - * Kill reply - */ -message KillContainerResponseProto { - required bool success = 1; -} - -/** - * AM suicide - */ -message AMSuicideRequestProto { - required string text = 1; - required int32 signal = 2; - required int32 delay = 3; -} - -/** - * AM suicide reply. For this to be returned implies - * a failure of the AM to kill itself - */ -message AMSuicideResponseProto { - -} - - -/** - * Ask for the instance definition details - */ -message GetInstanceDefinitionRequestProto { - -} - -/** - * Get the definition back as three separate JSON strings - */ -message GetInstanceDefinitionResponseProto { - required string internal = 1; - required string resources = 2; - required string application = 3; -} - - - /* ************************************************************************ - - REST model and operations. - Below here the operations and payloads designed to mimic - the REST API. That API is now the source of those - specificatations; this is simply a derivative. - - **************************************************************************/ - -/** - * See org.apache.slider.api.types.ApplicationLivenessInformation - */ -message ApplicationLivenessInformationProto { - optional bool allRequestsSatisfied = 1; - optional int32 requestsOutstanding = 2; -} - -/* - * see org.apache.slider.api.types.ComponentInformation - */ -message ComponentInformationProto { - optional string name = 1; - optional int32 priority = 2; - optional int32 desired = 3; - optional int32 actual = 4; - optional int32 releasing = 5; - optional int32 requested = 6; - optional int32 failed = 7; - optional int32 started = 8; - optional int32 startFailed = 9; - optional int32 completed = 10; - optional int32 totalRequested = 11; - optional string failureMessage =12; - optional int32 placementPolicy =13; - repeated string containers = 14; - optional int32 failedRecently = 15; - optional int32 nodeFailed = 16; - optional int32 preempted = 17; - optional int32 pendingAntiAffineRequestCount = 18; - optional bool isAARequestOutstanding = 19; -} - -/* - * see org.apache.slider.api.types.ContainerInformation - */ -message ContainerInformationProto { - optional string containerId = 1; - optional string component = 2; - optional bool released = 3; - optional int32 state = 4; - optional int32 exitCode = 5; - optional string diagnostics = 6; - optional int64 createTime = 7; - optional int64 startTime = 8; - repeated string output = 9; - optional string host = 10; - optional string hostURL = 11; - optional string placement = 12; - optional string appVersion = 13; -} - - -/* - * see org.apache.slider.api.types.PingInformation - */ -message PingInformationProto { - optional string text = 1; - optional string verb = 2; - optional string body = 3; - optional int64 time = 4; -} - -message NodeEntryInformationProto { - required int32 priority = 1; - required int32 requested = 2; - required int32 starting = 3; - required int32 startFailed = 4; - required int32 failed = 5; - required int32 failedRecently= 6; - required int32 preempted = 7; - required int32 live = 8; - required int32 releasing = 9; - required int64 lastUsed = 10; - required string name = 11; -} - -message NodeInformationProto { - required string hostname = 1; - required string state = 2; - required string httpAddress = 3; - required string rackName = 4; - required string labels = 5; - required string healthReport= 6; - required int64 lastUpdated = 7; - repeated NodeEntryInformationProto entries = 8; -} - -message GetModelRequestProto { -} - -message GetModelDesiredRequestProto { -} - -message GetModelDesiredAppconfRequestProto { -} - -message GetModelDesiredResourcesRequestProto { -} - -message GetModelResolvedAppconfRequestProto { -} - -message GetModelResolvedResourcesRequestProto { -} - -message GetModelLiveResourcesRequestProto { -} - -message GetLiveContainersRequestProto { -} - -message GetLiveContainersResponseProto { - repeated string names = 1; - repeated ContainerInformationProto containers = 2; -} - -message GetLiveContainerRequestProto { - required string containerId = 1; -} - - -message GetLiveComponentsRequestProto { -} - -message GetLiveComponentsResponseProto { - - repeated string names = 1; - repeated ComponentInformationProto components = 2; -} - -message GetLiveComponentRequestProto { - required string name = 1; -} - -message GetApplicationLivenessRequestProto { -} - -message EmptyPayloadProto { -} - -/** - Generic JSON, often containing data structures serialized as a string -*/ -message WrappedJsonProto { - required string json = 1; -} - -message GetCertificateStoreRequestProto { - optional string hostname = 1; - required string requesterId = 2; - required string password = 3; - required string type = 4; -} - -message GetCertificateStoreResponseProto { - required bytes store = 1; -} - -message GetLiveNodesRequestProto { -} - -message GetLiveNodesResponseProto { - repeated NodeInformationProto nodes = 1; -} - -message GetLiveNodeRequestProto { - required string name = 1; -} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto deleted file mode 100644 index 776ce28..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/proto/SliderClusterProtocol.proto +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -option java_package = "org.apache.slider.api.proto"; -option java_outer_classname = "SliderClusterAPI"; -option java_generic_services = true; -option java_generate_equals_and_hash = true; -package org.apache.slider.api; - -/* - -Compiling - -Maven: How to do it as part of the build - mvn install -DskipTests -Pcompile-protobuf - -How to do it so as to get error messages - -protoc --java_out=src/main/java \ - -Isrc/main/proto src/main/proto/SliderClusterMessages.proto \ - src/main/proto/SliderClusterProtocol.proto - -Once happy: commit the changes - -*/ - -//import "Security.proto"; -import "SliderClusterMessages.proto"; - - -/** - * Protocol used from between Slider Client and AM - */ -service SliderClusterProtocolPB { - - /** - * Stop the cluster - */ - - rpc stopCluster(StopClusterRequestProto) - returns(StopClusterResponseProto); - - /** - * Upgrade containers - */ - rpc upgradeContainers(UpgradeContainersRequestProto) - returns(UpgradeContainersResponseProto); - - rpc flexComponents(FlexComponentsRequestProto) returns (FlexComponentsResponseProto); - - /** - * Get the current cluster status - */ - rpc getJSONClusterStatus(GetJSONClusterStatusRequestProto) - returns(GetJSONClusterStatusResponseProto); - - /** - * List all running nodes in a role - */ - rpc listNodeUUIDsByRole(ListNodeUUIDsByRoleRequestProto) - returns(ListNodeUUIDsByRoleResponseProto); - - /** - * Get the details on a node - */ - rpc getNode(GetNodeRequestProto) - returns(GetNodeResponseProto); - - /** - * Get the - * details on a list of nodes. - * Unknown nodes are not returned - * <i>Important: the order of the results are undefined</i> - */ - rpc getClusterNodes(GetClusterNodesRequestProto) - returns(GetClusterNodesResponseProto); - - /** - * echo some text - */ - rpc echo(EchoRequestProto) - returns(EchoResponseProto); - - /** - * kill a container - */ - rpc killContainer(KillContainerRequestProto) - returns(KillContainerResponseProto); - - /** - * kill the AM - */ - rpc amSuicide(AMSuicideRequestProto) - returns(AMSuicideResponseProto); - - /* ************************************************************************ - - REST model and operations. - Below here the operations and payloads designed to mimic - the REST API. That API is now the source of those - specificatations; this is simply a derivative. - - **************************************************************************/ - - rpc getLivenessInformation(GetApplicationLivenessRequestProto) - returns(ApplicationLivenessInformationProto); - - rpc getLiveContainers(GetLiveContainersRequestProto) - returns(GetLiveContainersResponseProto); - - rpc getLiveContainer(GetLiveContainerRequestProto) - returns(ContainerInformationProto); - - rpc getLiveComponents(GetLiveComponentsRequestProto) - returns(GetLiveComponentsResponseProto); - - rpc getLiveComponent(GetLiveComponentRequestProto) - returns(ComponentInformationProto); - - rpc getLiveNodes(GetLiveNodesRequestProto) - returns(GetLiveNodesResponseProto); - - rpc getLiveNode(GetLiveNodeRequestProto) - returns(NodeInformationProto); -} http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo deleted file mode 100644 index 9e67c15..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ /dev/null @@ -1,15 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -org.apache.slider.server.appmaster.rpc.SliderRPCSecurityInfo http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/log4j.properties ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/log4j.properties deleted file mode 100644 index 65a7ad0..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/log4j.properties +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -# This is a log4j config for slider - -log4j.rootLogger=INFO,stdout -log4j.threshhold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{3} (%F:%M(%L)) - %m%n - -log4j.appender.subprocess=org.apache.log4j.ConsoleAppender -log4j.appender.subprocess.layout=org.apache.log4j.PatternLayout -log4j.appender.subprocess.layout.ConversionPattern=%c{1}: %m%n - - -#at debug this provides details on what is going on -log4j.logger.org.apache.slider=DEBUG -#log4j.logger.org.apache.slider.exec.RunLongLivedApp=ERROR - -log4j.logger.org.apache.hadoop.security=DEBUG -log4j.logger.org.apache.hadoop.yarn.service.launcher=DEBUG -log4j.logger.org.apache.hadoop.yarn.service=DEBUG -log4j.logger.org.apache.hadoop.yarn.client=DEBUG -#crank back on some noise -log4j.logger.org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor=WARN -log4j.logger.org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl=WARN -log4j.logger.org.apache.hadoop.yarn.client.RMProxy=WARN - -# for test runs we don't care about native code -log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR -# HDFS is noise on tets -log4j.logger.org.apache.hadoop.hdfs.server.datanode=WARN -log4j.logger.org.apache.hadoop.hdfs.server.namenode=WARN -log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement=WARN -log4j.logger.org.apache.hadoop.hdfs=WARN - -log4j.logger.org.apache.zookeeper=WARN http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/agent.txt ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/agent.txt b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/agent.txt deleted file mode 100644 index 79c1972..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/agent.txt +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -This is the conf directory for the python agent \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b3aff2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command.json ---------------------------------------------------------------------- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command.json deleted file mode 100644 index 197a046..0000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-slider/hadoop-yarn-slider-core/src/main/resources/org/apache/slider/providers/agent/conf/command.json +++ /dev/null @@ -1,168 +0,0 @@ -{ - "roleCommand": "START", - "clusterName": "c1", - "hostname": "c6402.ambari.apache.org", - "hostLevelParams": { - "java_home": "/usr/jdk64/jdk1.7.0_45" - }, - "commandType": "EXECUTION_COMMAND", - "roleParams": {}, - "serviceName": "HBASE", - "role": "HBASE_MASTER", - "commandParams": {}, - "taskId": 24, - "public_hostname": "c6402.ambari.apache.org", - "configurations": { - "hbase-log4j": { - "log4j.threshold": "ALL", - "log4j.rootLogger": "${hbase.root.logger}", - "log4j.logger.org.apache.zookeeper": "INFO", - "log4j.logger.org.apache.hadoop.hbase": "DEBUG", - "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher": "INFO", - "log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil": "INFO", - "log4j.category.SecurityLogger": "${hbase.security.logger}", - "log4j.appender.console": "org.apache.log4j.ConsoleAppender", - "log4j.appender.console.target": "System.err", - "log4j.appender.console.layout": "org.apache.log4j.PatternLayout", - "log4j.appender.console.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] %c{2}: %m%n", - "log4j.appender.RFAS": "org.apache.log4j.RollingFileAppender", - "log4j.appender.RFAS.layout": "org.apache.log4j.PatternLayout", - "log4j.appender.RFAS.layout.ConversionPattern": "%d{ISO8601} %p %c: %m%n", - "log4j.appender.RFAS.MaxFileSize": "${hbase.security.log.maxfilesize}", - "log4j.appender.RFAS.MaxBackupIndex": "${hbase.security.log.maxbackupindex}", - "log4j.appender.RFAS.File": "${hbase.log.dir}/${hbase.security.log.file}", - "log4j.appender.RFA": "org.apache.log4j.RollingFileAppender", - "log4j.appender.RFA.layout": "org.apache.log4j.PatternLayout", - "log4j.appender.RFA.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] %c{2}: %m%n", - "log4j.appender.RFA.MaxFileSize": "${hbase.log.maxfilesize}", - "log4j.appender.RFA.MaxBackupIndex": "${hbase.log.maxbackupindex}", - "log4j.appender.RFA.File": "${hbase.log.dir}/${hbase.log.file}", - "log4j.appender.NullAppender": "org.apache.log4j.varia.NullAppender", - "log4j.appender.DRFA": "org.apache.log4j.DailyRollingFileAppender", - "log4j.appender.DRFA.layout": "org.apache.log4j.PatternLayout", - "log4j.appender.DRFA.layout.ConversionPattern": "%d{ISO8601} %-5p [%t] %c{2}: %m%n", - "log4j.appender.DRFA.File": "${hbase.log.dir}/${hbase.log.file}", - "log4j.appender.DRFA.DatePattern": ".yyyy-MM-dd", - "log4j.additivity.SecurityLogger": "false", - "hbase.security.logger": "INFO,console", - "hbase.security.log.maxfilesize": "256MB", - "hbase.security.log.maxbackupindex": "20", - "hbase.security.log.file": "SecurityAuth.audit", - "hbase.root.logger": "INFO,console", - "hbase.log.maxfilesize": "256MB", - "hbase.log.maxbackupindex": "20", - "hbase.log.file": "hbase.log", - "hbase.log.dir": "." - }, - "global": { - "hbase_root": "/share/hbase/hbase-0.96.1-hadoop2", - "hbase_pid_dir": "/var/run/hbase", - "proxyuser_group": "users", - "syncLimit": "5", - "hbase_regionserver_heapsize": "1024m", - "rca_enabled": "false", - "tickTime": "2000", - "hbase_master_heapsize": "1024m", - "initLimit": "10", - "user_group": "hadoop", - "hbase_user": "hbase", - "hbase_log_dir": "/var/log/hbase" - }, - "hdfs-site": { - "dfs.namenode.checkpoint.period": "21600", - "dfs.namenode.avoid.write.stale.datanode": "true", - "dfs.namenode.checkpoint.txns": "1000000", - "dfs.block.access.token.enable": "true", - "dfs.support.append": "true", - "dfs.datanode.address": "0.0.0.0:${ambari.dfs.datanode.port}", - "dfs.cluster.administrators": " hdfs", - "dfs.replication": "3", - "ambari.dfs.datanode.http.port": "50075", - "dfs.datanode.balance.bandwidthPerSec": "6250000", - "dfs.namenode.safemode.threshold-pct": "1.0f", - "dfs.namenode.checkpoint.edits.dir": "${dfs.namenode.checkpoint.dir}", - "dfs.permissions.enabled": "true", - "dfs.client.read.shortcircuit": "true", - "dfs.namenode.https-address": "c6402.ambari.apache.org:50470", - "dfs.journalnode.edits.dir": "/grid/0/hdfs/journal", - "dfs.blocksize": "134217728", - "dfs.datanode.max.transfer.threads": "1024", - "dfs.datanode.du.reserved": "1073741824", - "dfs.webhdfs.enabled": "true", - "dfs.namenode.handler.count": "100", - "dfs.namenode.checkpoint.dir": "/hadoop/hdfs/namesecondary", - "fs.permissions.umask-mode": "022", - "dfs.datanode.http.address": "0.0.0.0:${ambari.dfs.datanode.http.port}", - "dfs.datanode.ipc.address": "0.0.0.0:8010", - "dfs.datanode.data.dir": "/hadoop/hdfs/data", - "dfs.namenode.http-address": "c6402.ambari.apache.org:50070", - "dfs.blockreport.initialDelay": "120", - "dfs.datanode.failed.volumes.tolerated": "0", - "dfs.namenode.accesstime.precision": "0", - "ambari.dfs.datanode.port": "50010", - "dfs.namenode.avoid.read.stale.datanode": "true", - "dfs.namenode.secondary.http-address": "c6402.ambari.apache.org:50090", - "dfs.namenode.stale.datanode.interval": "30000", - "dfs.heartbeat.interval": "3", - "dfs.client.read.shortcircuit.streams.cache.size": "4096", - "dfs.permissions.superusergroup": "hdfs", - "dfs.https.port": "50470", - "dfs.journalnode.http-address": "0.0.0.0:8480", - "dfs.domain.socket.path": "/var/lib/hadoop-hdfs/dn_socket", - "dfs.namenode.write.stale.datanode.ratio": "1.0f", - "dfs.hosts.exclude": "/etc/hadoop/conf/dfs.exclude", - "dfs.datanode.data.dir.perm": "750", - "dfs.namenode.name.dir.restore": "true", - "dfs.replication.max": "50", - "dfs.namenode.name.dir": "/hadoop/hdfs/namenode" - }, - "hbase-site": { - "hbase.hstore.flush.retries.number": "120", - "hbase.client.keyvalue.maxsize": "10485760", - "hbase.hstore.compactionThreshold": "3", - "hbase.rootdir": "hdfs://c6402.ambari.apache.org:8020/apps/hbase/data", - "hbase.stagingdir": "hdfs://c6402.ambari.apache.org:8020/apps/hbase/staging", - "hbase.regionserver.handler.count": "60", - "hbase.regionserver.global.memstore.lowerLimit": "0.38", - "hbase.hregion.memstore.block.multiplier": "2", - "hbase.hregion.memstore.flush.size": "134217728", - "hbase.superuser": "hbase", - "hbase.zookeeper.property.clientPort": "2181", - "hbase.regionserver.global.memstore.upperLimit": "0.4", - "zookeeper.session.timeout": "30000", - "hbase.tmp.dir": "/hadoop/hbase", - "hbase.hregion.max.filesize": "10737418240", - "hfile.block.cache.size": "0.40", - "hbase.security.authentication": "simple", - "hbase.defaults.for.version.skip": "true", - "hbase.zookeeper.quorum": "c6402.ambari.apache.org", - "zookeeper.znode.parent": "/hbase-unsecure", - "hbase.hstore.blockingStoreFiles": "10", - "hbase.hregion.majorcompaction": "86400000", - "hbase.security.authorization": "false", - "hbase.cluster.distributed": "true", - "hbase.hregion.memstore.mslab.enabled": "true", - "hbase.client.scanner.caching": "100", - "hbase.zookeeper.useMulti": "true", - "hbase.regionserver.info.port": "0", - "hbase.master.info.port": "60010" - }, - "core-site": { - "io.serializations": "org.apache.hadoop.io.serializer.WritableSerialization", - "gluster.daemon.user": "null", - "fs.trash.interval": "360", - "hadoop.security.authentication": "simple", - "io.compression.codecs": "org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec", - "mapreduce.jobtracker.webinterface.trusted": "false", - "fs.AbstractFileSystem.glusterfs.impl": "null", - "fs.defaultFS": "hdfs://c6402.ambari.apache.org:8020", - "ipc.client.connect.max.retries": "50", - "ipc.client.idlethreshold": "8000", - "io.file.buffer.size": "131072", - "hadoop.security.authorization": "false", - "hadoop.security.auth_to_local": "\n RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/\n RULE:[2:$1@$0](jhs@.*)s/.*/mapred/\n RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/\n RULE:[2:$1@$0](hm@.*)s/.*/hbase/\n RULE:[2:$1@$0](rs@.*)s/.*/hbase/\n DEFAULT", - "ipc.client.connection.maxidletime": "30000" - } - }, - "commandId": "2-2" -} \ No newline at end of file --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org