Github user jkbradley commented on a diff in the pull request:

    https://github.com/apache/spark/pull/12023#discussion_r57674671
  
    --- Diff: mllib/src/main/scala/org/apache/spark/ml/tree/treeModels.scala ---
    @@ -238,3 +238,128 @@ private[ml] object DecisionTreeModelReadWrite {
         finalNodes.head
       }
     }
    +
    +private[ml] object RandomForestModelReadWrite {
    +
    +  /**
    +    * Info for a [[org.apache.spark.ml.tree.Split]]
    +    *
    +    * @param featureIndex  Index of feature split on
    +    * @param leftCategoriesOrThreshold  For categorical feature, set of 
leftCategories.
    +    *                                   For continuous feature, threshold.
    +    * @param numCategories  For categorical feature, number of categories.
    +    *                       For continuous feature, -1.
    +    */
    +  case class SplitData(
    +                        featureIndex: Int,
    +                        leftCategoriesOrThreshold: Array[Double],
    +                        numCategories: Int) {
    +
    +    def getSplit: Split = {
    +      if (numCategories != -1) {
    +        new CategoricalSplit(featureIndex, leftCategoriesOrThreshold, 
numCategories)
    +      } else {
    +        assert(leftCategoriesOrThreshold.length == 1, s"DecisionTree split 
data expected" +
    +          s" 1 threshold for ContinuousSplit, but found thresholds: " +
    +          leftCategoriesOrThreshold.mkString(", "))
    +        new ContinuousSplit(featureIndex, leftCategoriesOrThreshold(0))
    +      }
    +    }
    +  }
    +
    +  object SplitData {
    +    def apply(split: Split): SplitData = split match {
    +      case s: CategoricalSplit =>
    +        SplitData(s.featureIndex, s.leftCategories, s.numCategories)
    +      case s: ContinuousSplit =>
    +        SplitData(s.featureIndex, Array(s.threshold), -1)
    +    }
    +  }
    +
    +  /**
    +    * Info for a [[Node]]
    +    *
    +    * @param treeID  Index used for tree identification in RandomForest
    +    * @param id  Index used for tree reconstruction.  Indices follow a 
pre-order traversal.
    +    * @param impurityStats  Stats array.  Impurity type is stored in 
metadata.
    +    * @param gain  Gain, or arbitrary value if leaf node.
    +    * @param leftChild  Left child index, or arbitrary value if leaf node.
    +    * @param rightChild  Right child index, or arbitrary value if leaf 
node.
    +    * @param split  Split info, or arbitrary value if leaf node.
    +    */
    +  case class NodeData(
    +                       treeID: Int,
    +                       id: Int,
    +                       prediction: Double,
    +                       impurity: Double,
    +                       impurityStats: Array[Double],
    +                       gain: Double,
    +                       leftChild: Int,
    +                       rightChild: Int,
    +                       split: SplitData)
    +
    +  object NodeData {
    +    /**
    +      * Create [[NodeData]] instances for this node and all children.
    +      *
    +      * @param id  Current ID.  IDs are assigned via a pre-order traversal.
    +      * @return (sequence of nodes in pre-order traversal order, largest 
ID in subtree)
    +      *         The nodes are returned in pre-order traversal (root first) 
so that it is easy to
    +      *         get the ID of the subtree's root node.
    +      */
    +    def build(node: Node, treeID: Int, id: Int): (Seq[NodeData], Int) = 
node match {
    +      case n: InternalNode =>
    +        val (leftNodeData, leftIdx) = build(n.leftChild, treeID, id + 1)
    +        val (rightNodeData, rightIdx) = build(n.rightChild, treeID, 
leftIdx + 1)
    +        val thisNodeData = NodeData(treeID, id, n.prediction, n.impurity, 
n.impurityStats.stats,
    +          n.gain, leftNodeData.head.id, rightNodeData.head.id, 
SplitData(n.split))
    +        (thisNodeData +: (leftNodeData ++ rightNodeData), rightIdx)
    +      case _: LeafNode =>
    +        (Seq(NodeData(treeID, id, node.prediction, node.impurity, 
node.impurityStats.stats,
    +          -1.0, -1, -1, SplitData(-1, Array.empty[Double], -1))),
    +          id)
    +    }
    +  }
    +
    +  def loadTreeNodes(
    --- End diff --
    
    This should not be copied either.  Please reuse the one for trees.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to