This is an automated email from the ASF dual-hosted git repository.

casion pushed a commit to branch dev-1.9.0
in repository https://gitbox.apache.org/repos/asf/linkis.git


The following commit(s) were added to refs/heads/dev-1.9.0 by this push:
     new 2a3d1b1566 feat(ci): add support for spark and hive engine integration 
tests (#5287)
2a3d1b1566 is described below

commit 2a3d1b1566823f6669fb254684468ec33b3bf4f6
Author: Kazuto Iris <[email protected]>
AuthorDate: Fri Nov 21 10:59:24 2025 +0800

    feat(ci): add support for spark and hive engine integration tests (#5287)
---
 .github/workflows/integration-test.yml             |  15 +--
 linkis-dist/bin/install-linkis-to-kubernetes.sh    |   2 +
 .../ldh-with-mysql-jdbc.Dockerfile}                |  39 ++-----
 linkis-dist/docker/ldh.Dockerfile                  |   9 +-
 .../scripts/make-ldh-image-with-mysql-jdbc.sh      |  51 +++++++++
 linkis-dist/helm/charts/linkis/templates/jobs.yaml |  11 +-
 linkis-dist/helm/scripts/install-ldh.sh            |   5 +-
 linkis-dist/helm/scripts/prepare-for-spark.sh      |  25 ++---
 .../resources/ldh/configmaps/configmap-hadoop.yaml | 125 ++++++++++++++++++++-
 9 files changed, 226 insertions(+), 56 deletions(-)

diff --git a/.github/workflows/integration-test.yml 
b/.github/workflows/integration-test.yml
index f99f8c30e6..e5af9803c3 100644
--- a/.github/workflows/integration-test.yml
+++ b/.github/workflows/integration-test.yml
@@ -152,6 +152,8 @@ jobs:
             sleep 20
           done
 
+          bash ./linkis-dist/helm/scripts/prepare-for-spark.sh
+
           #show linkis pod logs
           #POD_NAME=`kubectl get pods -n linkis -l 
app.kubernetes.io/instance=linkis-demo-cg-linkismanager -o 
jsonpath='{.items[0].metadata.name}'`
           #kubectl logs -n linkis  ${POD_NAME} -f --tail=10000
@@ -170,16 +172,15 @@ jobs:
 
           # Execute test by linkis-cli
           POD_NAME=`kubectl get pods -n linkis -l 
app.kubernetes.io/instance=linkis-demo-mg-gateway -o 
jsonpath='{.items[0].metadata.name}'`
-          kubectl exec -it -n linkis  ${POD_NAME} -- bash -c " \
+          kubectl exec -n linkis  ${POD_NAME} -- bash -c " \
           sh /opt/linkis/bin/linkis-cli -engineType shell-1 -codeType shell 
-code \"pwd\" ";
 
-          kubectl exec -it -n linkis  ${POD_NAME} -- bash -c " \
+          kubectl exec -n linkis  ${POD_NAME} -- bash -c " \
           sh /opt/linkis/bin/linkis-cli -engineType python-python2 -codeType 
python -code   'print(\"hello\")' "
 
-          #todo
-          #kubectl exec -it -n linkis  ${POD_NAME} -- bash -c " \
-          #sh /opt/linkis/bin/linkis-cli -engineType hive-3.1.3 -codeType hql 
-code   'show databases' "
+          kubectl exec -n linkis  ${POD_NAME} -- bash -c " \
+          sh /opt/linkis/bin/linkis-cli -engineType hive-3.1.3 -codeType hql 
-code   'show databases' "
 
-          #kubectl exec -it -n linkis  ${POD_NAME} -- bash -c " \
-          #sh /opt/linkis/bin/linkis-cli -engineType spark-3.2.1 -codeType sql 
-code   'show databases' "
+          kubectl exec -n linkis  ${POD_NAME} -- bash -c " \
+          sh /opt/linkis/bin/linkis-cli -engineType spark-3.2.1 -codeType sql 
-code   'show databases' "
         shell: bash
diff --git a/linkis-dist/bin/install-linkis-to-kubernetes.sh 
b/linkis-dist/bin/install-linkis-to-kubernetes.sh
index 00681b27b9..a10519342b 100644
--- a/linkis-dist/bin/install-linkis-to-kubernetes.sh
+++ b/linkis-dist/bin/install-linkis-to-kubernetes.sh
@@ -86,6 +86,8 @@ tag(){
 make_linkis_image_with_mysql_jdbc(){
     ${ROOT_DIR}/docker/scripts/make-linkis-image-with-mysql-jdbc.sh
     docker tag linkis:with-jdbc linkis:dev
+    ${ROOT_DIR}/docker/scripts/make-ldh-image-with-mysql-jdbc.sh
+    docker tag linkis-ldh:with-jdbc linkis-ldh:dev
 }
 #creating a kind cluster
 create_kind_cluster(){
diff --git a/linkis-dist/helm/scripts/install-ldh.sh 
b/linkis-dist/docker/ldh-with-mysql-jdbc.Dockerfile
old mode 100755
new mode 100644
similarity index 50%
copy from linkis-dist/helm/scripts/install-ldh.sh
copy to linkis-dist/docker/ldh-with-mysql-jdbc.Dockerfile
index 3ada87befd..515ce3e32c
--- a/linkis-dist/helm/scripts/install-ldh.sh
+++ b/linkis-dist/docker/ldh-with-mysql-jdbc.Dockerfile
@@ -1,4 +1,3 @@
-#!/usr/bin/env bash
 #
 # Licensed to the Apache Software Foundation (ASF) under one or more
 # contributor license agreements.  See the NOTICE file distributed with
@@ -6,39 +5,25 @@
 # The ASF licenses this file to You under the Apache License, Version 2.0
 # (the "License"); you may not use this file except in compliance with
 # the License.  You may obtain a copy of the License at
-# http://www.apache.org/licenses/LICENSE-2.0
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-#
-
-WORK_DIR=`cd $(dirname $0); pwd -P`
-
-. ${WORK_DIR}/common.sh
-
-set -e
 
-USING_KIND=${1:-false}
-LDH_VERSION=${LDH_VERSION-${LINKIS_IMAGE_TAG}}
-echo "# LDH version: ${LINKIS_IMAGE_TAG}"
+ARG LINKIS_IMAGE=linkis-ldh:dev
 
-# load image
-if [[ "X${USING_KIND}" == "Xtrue" ]]; then
-  echo "# Loading LDH image ..."
-  kind load docker-image linkis-ldh:${LINKIS_IMAGE_TAG} --name 
${KIND_CLUSTER_NAME}
-fi
+######################################################################
+# linkis-ldh image with mysql jdbc
+######################################################################
+FROM ${LINKIS_IMAGE}
 
-# deploy LDH
-echo "# Deploying LDH ..."
-set +e
-x=`kubectl get ns ldh 2> /dev/null`
-set -e
-if [[ "X${x}" == "X" ]]; then
-  kubectl create ns ldh
-fi
-kubectl apply -n ldh -f ${RESOURCE_DIR}/ldh/configmaps
+ARG LDH_HOME=/opt/ldh/current
+ARG MYSQL_JDBC_VERSION=8.0.28
 
-LDH_VERSION=${LDH_VERSION} envsubst < ${RESOURCE_DIR}/ldh/ldh.yaml | kubectl 
apply -n ldh -f -
+COPY mysql-connector-java-${MYSQL_JDBC_VERSION}.jar ${LDH_HOME}/hive/lib/
+COPY mysql-connector-java-${MYSQL_JDBC_VERSION}.jar ${LDH_HOME}/spark/lib/
diff --git a/linkis-dist/docker/ldh.Dockerfile 
b/linkis-dist/docker/ldh.Dockerfile
index 8a1d64abce..5c4683ddb6 100644
--- a/linkis-dist/docker/ldh.Dockerfile
+++ b/linkis-dist/docker/ldh.Dockerfile
@@ -75,6 +75,10 @@ ADD 
ldh-tars/spark-${SPARK_VERSION}-bin-hadoop${SPARK_HADOOP_VERSION}.tgz /opt/l
 ADD ldh-tars/flink-${FLINK_VERSION}-bin-scala_2.11.tgz 
/opt/ldh/${LINKIS_VERSION}/
 ADD ldh-tars/apache-zookeeper-${ZOOKEEPER_VERSION}-bin.tar.gz 
/opt/ldh/${LINKIS_VERSION}/
 
+RUN ln -s 
/opt/ldh/${LINKIS_VERSION}/spark-${SPARK_VERSION}-bin-hadoop${SPARK_HADOOP_VERSION}
 /opt/ldh/current/spark \
+    && ln -s /opt/ldh/${LINKIS_VERSION}/hadoop-${HADOOP_VERSION} 
/opt/ldh/current/hadoop \
+    && ln -s /opt/ldh/${LINKIS_VERSION}/apache-hive-${HIVE_VERSION}-bin 
/opt/ldh/current/hive
+
 RUN mkdir -p /etc/ldh \
     && mkdir -p /var/log/hadoop && chmod 777 -R /var/log/hadoop \
     && mkdir -p /var/log/hive && chmod 777 -R /var/log/hive \
@@ -91,9 +95,10 @@ RUN mkdir -p /etc/ldh \
 #ADD ldh-tars/mysql-connector-java-${MYSQL_JDBC_VERSION}.jar 
/opt/ldh/current/hive/lib/
 #ADD ldh-tars/mysql-connector-java-${MYSQL_JDBC_VERSION}.jar 
/opt/ldh/current/spark/jars/
 
-ENV JAVA_HOME /etc/alternatives/jre
-ENV PATH 
/opt/ldh/current/hadoop/bin:/opt/ldh/current/hive/bin:/opt/ldh/current/spark/bin:/opt/ldh/current/flink/bin:/opt/ldh/current/zookeeper/bin:$PATH
+ENV JAVA_HOME=/etc/alternatives/jre
+ENV 
PATH=/opt/ldh/current/hadoop/bin:/opt/ldh/current/hive/bin:/opt/ldh/current/spark/bin:/opt/ldh/current/flink/bin:/opt/ldh/current/zookeeper/bin:$PATH
 ENV HADOOP_CONF_DIR=/etc/ldh/hadoop
+ENV YARN_CONF_DIR=/etc/ldh/hadoop
 ENV HIVE_CONF_DIR=/etc/ldh/hive
 ENV SPARK_CONF_DIR=/etc/ldh/spark
 ENV FLINK_CONF_DIR=/etc/ldh/flink
diff --git a/linkis-dist/docker/scripts/make-ldh-image-with-mysql-jdbc.sh 
b/linkis-dist/docker/scripts/make-ldh-image-with-mysql-jdbc.sh
new file mode 100755
index 0000000000..6b91cddf20
--- /dev/null
+++ b/linkis-dist/docker/scripts/make-ldh-image-with-mysql-jdbc.sh
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#
+
+WORK_DIR=`cd $(dirname $0); pwd -P`
+
+. ${WORK_DIR}/utils.sh
+
+IMAGE_NAME=${IMAGE_NAME:-linkis-ldh:with-jdbc}
+LINKIS_IMAGE=${LINKIS_IMAGE:-linkis-ldh:dev}
+LINKIS_HOME=${LINKIS_HOME:-/opt/ldh/current}
+MYSQL_JDBC_VERSION=${MYSQL_JDBC_VERSION:-8.0.28}
+MYSQL_JDBC_FILENAME=mysql-connector-java-${MYSQL_JDBC_VERSION}.jar
+MYSQL_JDBC_URL="https://repo1.maven.org/maven2/mysql/mysql-connector-java/${MYSQL_JDBC_VERSION}/${MYSQL_JDBC_FILENAME}";
+
+BUILD_DIR=`mktemp -d -t linkis-build-XXXXX`
+
+echo "#          build dir: ${BUILD_DIR}"
+echo "#         base image: ${LINKIS_IMAGE}"
+echo "# mysql jdbc version: ${MYSQL_JDBC_VERSION}"
+
+download ${MYSQL_JDBC_URL} ${MYSQL_JDBC_FILENAME} ${BUILD_DIR}
+
+echo "try to exec: docker build -f 
${WORK_DIR}/../ldh-with-mysql-jdbc.Dockerfile \
+  -t ${IMAGE_NAME} \
+  --build-arg LINKIS_IMAGE=${LINKIS_IMAGE} \
+  --build-arg LINKIS_HOME=${LINKIS_HOME} \
+  --build-arg MYSQL_JDBC_VERSION=${MYSQL_JDBC_VERSION} \
+  ${BUILD_DIR}"
+
+docker build -f ${WORK_DIR}/../ldh-with-mysql-jdbc.Dockerfile \
+  -t ${IMAGE_NAME} \
+  --build-arg LINKIS_IMAGE=${LINKIS_IMAGE} \
+  --build-arg LINKIS_HOME=${LINKIS_HOME} \
+  --build-arg MYSQL_JDBC_VERSION=${MYSQL_JDBC_VERSION} \
+  ${BUILD_DIR}
+
+echo "# done, image: ${IMAGE_NAME}"
diff --git a/linkis-dist/helm/charts/linkis/templates/jobs.yaml 
b/linkis-dist/helm/charts/linkis/templates/jobs.yaml
index 38d97ee2e2..5daebb04ad 100644
--- a/linkis-dist/helm/charts/linkis/templates/jobs.yaml
+++ b/linkis-dist/helm/charts/linkis/templates/jobs.yaml
@@ -28,7 +28,16 @@ spec:
           command:
             - /bin/bash
             - -ecx
-            - >-
+            - |-
+              sed -i 's#@YARN_RESTFUL_URL#{{ 
.Values.linkis.deps.yarn.restfulUrl }}#g' {{ .Values.linkis.locations.homeDir 
}}/db/linkis_dml.sql
+              sed -i 's#@HADOOP_VERSION#{{ .Values.linkis.deps.hadoop.version 
}}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+              sed -i 's#@YARN_AUTH_ENABLE#{{ 
.Values.linkis.deps.yarn.authEnable }}#g' {{ .Values.linkis.locations.homeDir 
}}/db/linkis_dml.sql
+              sed -i 's#@YARN_AUTH_USER#{{ .Values.linkis.deps.yarn.authUser 
}}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+              sed -i 's#@YARN_AUTH_PWD#{{ 
.Values.linkis.deps.yarn.authPassword }}#g' {{ .Values.linkis.locations.homeDir 
}}/db/linkis_dml.sql
+              sed -i 's#@YARN_KERBEROS_ENABLE#{{ 
.Values.linkis.deps.yarn.kerberosEnable }}#g' {{ 
.Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+              sed -i 's#@YARN_PRINCIPAL_NAME#{{ 
.Values.linkis.deps.yarn.principal }}#g' {{ .Values.linkis.locations.homeDir 
}}/db/linkis_dml.sql
+              sed -i 's#@YARN_KEYTAB_PATH#{{ .Values.linkis.deps.yarn.keytab 
}}#g' {{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
+              sed -i 's#@YARN_KRB5_PATH#{{ .Values.linkis.deps.yarn.krb5 }}#g' 
{{ .Values.linkis.locations.homeDir }}/db/linkis_dml.sql
               mysql -h{{ .Values.linkis.datasource.host }} -P{{ 
.Values.linkis.datasource.port }} -u{{ .Values.linkis.datasource.username }} 
-p{{ .Values.linkis.datasource.password }} --default-character-set=utf8 -e 
"CREATE DATABASE IF NOT EXISTS {{ .Values.linkis.datasource.database }} DEFAULT 
CHARSET utf8 COLLATE utf8_general_ci";
               mysql -h{{ .Values.linkis.datasource.host }} -P{{ 
.Values.linkis.datasource.port }} -u{{ .Values.linkis.datasource.username }} 
-p{{ .Values.linkis.datasource.password }} -D{{ 
.Values.linkis.datasource.database }}  --default-character-set=utf8 -e "source 
{{ .Values.linkis.locations.homeDir }}/db//linkis_ddl.sql";
               mysql -h{{ .Values.linkis.datasource.host }} -P{{ 
.Values.linkis.datasource.port }} -u{{ .Values.linkis.datasource.username }} 
-p{{ .Values.linkis.datasource.password }} -D{{ 
.Values.linkis.datasource.database }}  --default-character-set=utf8 -e "source 
{{ .Values.linkis.locations.homeDir }}/db//linkis_dml.sql"
diff --git a/linkis-dist/helm/scripts/install-ldh.sh 
b/linkis-dist/helm/scripts/install-ldh.sh
index 3ada87befd..74d1960f5f 100755
--- a/linkis-dist/helm/scripts/install-ldh.sh
+++ b/linkis-dist/helm/scripts/install-ldh.sh
@@ -16,7 +16,7 @@
 #
 
 WORK_DIR=`cd $(dirname $0); pwd -P`
-
+ROOT_DIR=${WORK_DIR}/../..
 . ${WORK_DIR}/common.sh
 
 set -e
@@ -27,6 +27,9 @@ echo "# LDH version: ${LINKIS_IMAGE_TAG}"
 
 # load image
 if [[ "X${USING_KIND}" == "Xtrue" ]]; then
+  echo "# Preparing LDH image ..."
+  ${ROOT_DIR}/docker/scripts/make-ldh-image-with-mysql-jdbc.sh
+  docker tag linkis-ldh:with-jdbc linkis-ldh:dev
   echo "# Loading LDH image ..."
   kind load docker-image linkis-ldh:${LINKIS_IMAGE_TAG} --name 
${KIND_CLUSTER_NAME}
 fi
diff --git a/linkis-dist/helm/scripts/prepare-for-spark.sh 
b/linkis-dist/helm/scripts/prepare-for-spark.sh
index 5b2b35a824..8519e0bdee 100644
--- a/linkis-dist/helm/scripts/prepare-for-spark.sh
+++ b/linkis-dist/helm/scripts/prepare-for-spark.sh
@@ -21,22 +21,16 @@ WORK_DIR=`cd $(dirname $0); pwd -P`
 
 ## copy spark resource from ldh to linkis-cg-engineconnmanager
 
-LDH_POD_NAME=`kubectl get pods -n ldh -l app=ldh    -o 
jsonpath='{.items[0].metadata.name}'`
-kubectl cp -n ldh ${LDH_POD_NAME}:/opt/ldh/ ./ldh
-
+LDH_POD_NAME=`kubectl get pods -n ldh -l app=ldh -o 
jsonpath='{.items[0].metadata.name}'`
 ECM_POD_NAME=`kubectl get pods -n linkis -l 
app.kubernetes.io/instance=linkis-demo-cg-engineconnmanager -o 
jsonpath='{.items[0].metadata.name}'`
-kubectl cp ./ldh  -n linkis ${ECM_POD_NAME}:/opt/ ;
-
-
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "chmod +x 
/opt/ldh/1.3.0/spark-3.2.1-bin-hadoop3.2/bin/*"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "ln -s 
/opt/ldh/1.3.0/spark-3.2.1-bin-hadoop3.2 /opt/ldh/current/spark"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "ln -s 
/opt/ldh/1.3.0/hadoop-3.3.4 /opt/ldh/current/hadoop"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "ln -s 
/opt/ldh/1.3.0/apache-hive-3.1.3-bin /opt/ldh/current/hive"
 
+kubectl exec -n ldh ${LDH_POD_NAME} -- tar -C /opt -cf - ldh | \
+kubectl exec -i -n linkis ${ECM_POD_NAME} -- tar -C /opt -xf - --no-same-owner
 
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "echo 'export 
SPARK_HOME=/opt/ldh/current/spark' |sudo tee --append /etc/profile"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "echo 'export 
PATH=\$SPARK_HOME/bin:\$PATH' |sudo tee --append  /etc/profile"
-kubectl exec -it -n linkis ${ECM_POD_NAME} -- bash -c "source /etc/profile"
+kubectl exec -n linkis ${ECM_POD_NAME} -- bash -c "sudo mkdir -p 
/appcom/Install && sudo chmod 0777 /appcom/Install && ln -s 
/opt/ldh/current/spark /appcom/Install/spark"
+kubectl exec -n linkis ${ECM_POD_NAME} -- bash -c "echo 'export 
SPARK_HOME=/opt/ldh/current/spark' |sudo tee --append /etc/profile"
+kubectl exec -n linkis ${ECM_POD_NAME} -- bash -c "echo 'export 
PATH=\$SPARK_HOME/bin:\$PATH' |sudo tee --append  /etc/profile"
+kubectl exec -n linkis ${ECM_POD_NAME} -- bash -c "source /etc/profile"
 
 # add ecm dns for ldh pod
 ECM_POD_IP=`kubectl get pods -n linkis -l 
app.kubernetes.io/instance=linkis-demo-cg-engineconnmanager -o 
jsonpath='{.items[0].status.podIP}'`
@@ -45,7 +39,4 @@ ECM_POD_SUBDOMAIN=`kubectl get pods -n linkis -l 
app.kubernetes.io/instance=link
 
 ECM_DNS="${ECM_POD_IP}   
${ECM_POD_NAME}.${ECM_POD_SUBDOMAIN}.linkis.svc.cluster.local"
 
-kubectl exec -it -n ldh ${LDH_POD_NAME} -- bash -c "echo ${ECM_DNS} |sudo tee 
--append  /etc/hosts"
-
-
-rm -rf ldh;
\ No newline at end of file
+kubectl exec -n ldh ${LDH_POD_NAME} -- bash -c "echo ${ECM_DNS} |sudo tee 
--append  /etc/hosts"
diff --git 
a/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml 
b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml
index fa74a304c9..904e88bc3f 100644
--- a/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml
+++ b/linkis-dist/helm/scripts/resources/ldh/configmaps/configmap-hadoop.yaml
@@ -591,4 +591,127 @@ data:
           <aclAdministerApps>*</aclAdministerApps>
         </queue>
       </queue>
-    </allocations>
\ No newline at end of file
+    </allocations>
+  capacity-scheduler.xml: |
+    <!--
+      Licensed under the Apache License, Version 2.0 (the "License");
+      you may not use this file except in compliance with the License.
+      You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+      Unless required by applicable law or agreed to in writing, software
+      distributed under the License is distributed on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+      See the License for the specific language governing permissions and
+      limitations under the License. See accompanying LICENSE file.
+    -->
+    <configuration>
+
+      <property>
+        <name>yarn.scheduler.capacity.maximum-applications</name>
+        <value>4</value>
+        <description>
+          Maximum number of applications that can be pending and running.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+        <value>0.5</value>
+        <description>
+          Maximum percent of resources in the cluster which can be used to run
+          application masters i.e. controls number of concurrent running
+          applications.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.resource-calculator</name>
+        
<value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
+        <description>
+          The ResourceCalculator implementation to be used to compare
+          Resources in the scheduler.
+          The default i.e. DefaultResourceCalculator only uses Memory while
+          DominantResourceCalculator uses dominant-resource to compare
+          multi-dimensional resources such as Memory, CPU etc.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.queues</name>
+        <value>default</value>
+        <description>
+          The queues at the this level (root is the root queue).
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.capacity</name>
+        <value>100</value>
+        <description>Default queue target capacity.</description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+        <value>1</value>
+        <description>
+          Default queue user limit a percentage from 0.0 to 1.0.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+        <value>100</value>
+        <description>
+          The maximum capacity of the default queue.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.state</name>
+        <value>RUNNING</value>
+        <description>
+          The state of the default queue. State can be one of RUNNING or 
STOPPED.
+        </description>
+      </property>
+
+      <property>
+        
<name>yarn.scheduler.capacity.root.default.acl_submit_applications</name>
+        <value>*</value>
+        <description>
+          The ACL of who can submit jobs to the default queue.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.root.default.acl_administer_queue</name>
+        <value>*</value>
+        <description>
+          The ACL of who can administer jobs on the default queue.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.node-locality-delay</name>
+        <value>40</value>
+        <description>
+          Number of missed scheduling opportunities after which the 
CapacityScheduler
+          attempts to schedule rack-local containers.
+          Typically this should be set to number of nodes in the cluster, By 
default is setting
+          approximately number of nodes in one rack which is 40.
+        </description>
+      </property>
+
+      <property>
+        <name>yarn.scheduler.capacity.queue-mappings-override.enable</name>
+        <value>false</value>
+        <description>
+          If a queue mapping is present, will it override the value specified
+          by the user? This can be used by administrators to place jobs in 
queues
+          that are different than the one specified by the user.
+          The default is false.
+        </description>
+      </property>
+
+    </configuration>


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to