wbo4958 commented on code in PR #43494:
URL: https://github.com/apache/spark/pull/43494#discussion_r1369478240


##########
core/src/main/scala/org/apache/spark/scheduler/ExecutorResourcesAmounts.scala:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.scheduler
+
+import scala.collection.mutable.HashMap
+
+import org.apache.spark.SparkException
+import org.apache.spark.resource.{ResourceInformation, ResourceProfile}
+import org.apache.spark.resource.ResourceAmountUtils.RESOURCE_TOTAL_AMOUNT
+
+/**
+ * Class to hold information about a series of resources belonging to an 
executor.
+ * A resource could be a GPU, FPGA, etc. And it is used as a temporary
+ * class to calculate the resources amounts when offering resources to
+ * the tasks in the [[TaskSchedulerImpl]]
+ *
+ * One example is GPUs, where the addresses would be the indices of the GPUs
+ *
+ * @param resources The executor available resources and amount. eg,
+ *                  Map("gpu" -> mutable.Map("0" -> 0.2, "1" -> 1.0),
+ *                  "fpga" -> mutable.Map("a" -> 0.3, "b" -> 0.9)
+ *                  )
+ */
+private[spark] class ExecutorResourcesAmounts(
+    private val resources: Map[String, Map[String, Double]]) extends 
Serializable {
+
+  resources.foreach { case (_, addressMount) =>
+    addressMount.foreach { case (_, amount) => assert(amount <= 1.0)}}
+
+  // multiply the RESOURCE_TOTAL_AMOUNT to avoid using double directly.
+  // and convert the addressesAmounts to be mutable.HashMap
+  private val internalResources: Map[String, HashMap[String, Long]] = {
+    resources.map { case (rName, addressAmounts) =>
+      rName -> HashMap(addressAmounts.map { case (address, amount) =>
+        address -> (amount * RESOURCE_TOTAL_AMOUNT).toLong
+      }.toSeq: _*)
+    }
+  }
+
+  // It maps from the resource name to its amount.
+  lazy val resourceAmount: Map[String, Int] = internalResources.map { case 
(rName, addressMap) =>
+    rName -> addressMap.size
+  }
+
+  // convert internal resources back to the public.
+  def availableResources: Map[String, Map[String, Double]] = {
+    internalResources.map { case (rName, addressMap) =>
+      rName -> addressMap.map { case (address, amount) =>
+        address -> amount.toDouble / RESOURCE_TOTAL_AMOUNT
+      }.toMap
+    }
+  }
+
+  // Acquire the resource and update the resource
+  def acquire(assignedResource: Map[String, Map[String, Double]]): Unit = {
+    assignedResource.foreach { case (rName, taskResAmounts) =>
+      val availableResourceAmounts = internalResources.getOrElse(rName,
+        throw new SparkException(s"Try to acquire an address from $rName that 
doesn't exist"))
+      taskResAmounts.foreach { case (address, amount) =>
+        val prevInternalTotalAmount = 
availableResourceAmounts.getOrElse(address,
+          throw new SparkException(s"Try to acquire an address that doesn't 
exist. $rName " +
+            s"address $address doesn't exist."))
+
+        val internalTaskAmount = (amount * RESOURCE_TOTAL_AMOUNT).toLong
+        val internalLeft = prevInternalTotalAmount - internalTaskAmount
+        val realLeft = internalLeft.toDouble / RESOURCE_TOTAL_AMOUNT
+        if (realLeft < 0) {
+          throw new SparkException(s"The total amount ${realLeft} " +
+            s"after acquiring $rName address $address should be >= 0")
+        }
+        internalResources(rName)(address) = internalLeft
+        // scalastyle:off println

Review Comment:
   I will remove the debug log in the next commit



##########
core/src/main/scala/org/apache/spark/scheduler/ExecutorResourcesAmounts.scala:
##########
@@ -0,0 +1,199 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.scheduler
+
+import scala.collection.mutable.HashMap
+
+import org.apache.spark.SparkException
+import org.apache.spark.resource.{ResourceInformation, ResourceProfile}
+import org.apache.spark.resource.ResourceAmountUtils.RESOURCE_TOTAL_AMOUNT
+
+/**
+ * Class to hold information about a series of resources belonging to an 
executor.
+ * A resource could be a GPU, FPGA, etc. And it is used as a temporary
+ * class to calculate the resources amounts when offering resources to
+ * the tasks in the [[TaskSchedulerImpl]]
+ *
+ * One example is GPUs, where the addresses would be the indices of the GPUs
+ *
+ * @param resources The executor available resources and amount. eg,
+ *                  Map("gpu" -> mutable.Map("0" -> 0.2, "1" -> 1.0),
+ *                  "fpga" -> mutable.Map("a" -> 0.3, "b" -> 0.9)
+ *                  )
+ */
+private[spark] class ExecutorResourcesAmounts(
+    private val resources: Map[String, Map[String, Double]]) extends 
Serializable {
+
+  resources.foreach { case (_, addressMount) =>
+    addressMount.foreach { case (_, amount) => assert(amount <= 1.0)}}
+
+  // multiply the RESOURCE_TOTAL_AMOUNT to avoid using double directly.
+  // and convert the addressesAmounts to be mutable.HashMap
+  private val internalResources: Map[String, HashMap[String, Long]] = {
+    resources.map { case (rName, addressAmounts) =>
+      rName -> HashMap(addressAmounts.map { case (address, amount) =>
+        address -> (amount * RESOURCE_TOTAL_AMOUNT).toLong
+      }.toSeq: _*)
+    }
+  }
+
+  // It maps from the resource name to its amount.
+  lazy val resourceAmount: Map[String, Int] = internalResources.map { case 
(rName, addressMap) =>
+    rName -> addressMap.size
+  }
+
+  // convert internal resources back to the public.
+  def availableResources: Map[String, Map[String, Double]] = {
+    internalResources.map { case (rName, addressMap) =>
+      rName -> addressMap.map { case (address, amount) =>
+        address -> amount.toDouble / RESOURCE_TOTAL_AMOUNT
+      }.toMap
+    }
+  }
+
+  // Acquire the resource and update the resource
+  def acquire(assignedResource: Map[String, Map[String, Double]]): Unit = {
+    assignedResource.foreach { case (rName, taskResAmounts) =>
+      val availableResourceAmounts = internalResources.getOrElse(rName,
+        throw new SparkException(s"Try to acquire an address from $rName that 
doesn't exist"))
+      taskResAmounts.foreach { case (address, amount) =>
+        val prevInternalTotalAmount = 
availableResourceAmounts.getOrElse(address,
+          throw new SparkException(s"Try to acquire an address that doesn't 
exist. $rName " +
+            s"address $address doesn't exist."))
+
+        val internalTaskAmount = (amount * RESOURCE_TOTAL_AMOUNT).toLong
+        val internalLeft = prevInternalTotalAmount - internalTaskAmount
+        val realLeft = internalLeft.toDouble / RESOURCE_TOTAL_AMOUNT
+        if (realLeft < 0) {
+          throw new SparkException(s"The total amount ${realLeft} " +
+            s"after acquiring $rName address $address should be >= 0")
+        }
+        internalResources(rName)(address) = internalLeft
+        // scalastyle:off println
+        println(s"Acquired. left ${realLeft}")
+        // scalastyle:on println
+      }
+    }
+  }
+
+  // release the resources and update the values
+  def release(assignedResource: Map[String, Map[String, Double]]): Unit = {
+    assignedResource.foreach { case (rName, taskResAmounts) =>
+      val availableResourceAmounts = internalResources.getOrElse(rName,
+        throw new SparkException(s"Try to release an address from $rName that 
doesn't exist"))
+      taskResAmounts.foreach { case (address, amount) =>
+        val prevInternalTotalAmount = 
availableResourceAmounts.getOrElse(address,
+          throw new SparkException(s"Try to release an address that is not 
assigned. $rName " +
+            s"address $address is not assigned."))
+        val internalTaskAmount = (amount * RESOURCE_TOTAL_AMOUNT).toLong
+        val internalTotal = prevInternalTotalAmount + internalTaskAmount
+        if (internalTotal > RESOURCE_TOTAL_AMOUNT) {
+          throw new SparkException(s"The total amount " +
+            s"${internalTotal.toDouble / RESOURCE_TOTAL_AMOUNT} " +
+            s"after releasing $rName address $address should be <= 1.0")
+        }
+        internalResources(rName)(address) = internalTotal
+        // scalastyle:off println

Review Comment:
   I will remove the debug log in the next commit



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to