yabola commented on code in PR #7184:
URL: https://github.com/apache/kyuubi/pull/7184#discussion_r2329138906


##########
extensions/spark/kyuubi-spark-lineage/src/main/scala/org/apache/kyuubi/plugin/lineage/helper/SparkSQLLineageParseHelper.scala:
##########
@@ -53,18 +54,27 @@ trait LineageParser {
   type AttributeMap[A] = ListMap[Attribute, A]
 
   def parse(plan: LogicalPlan): Lineage = {
-    val columnsLineage =
-      extractColumnsLineage(plan, ListMap[Attribute, 
AttributeSet]()).toList.collect {
-        case (k, attrs) =>
-          k.name -> attrs.map(attr => (attr.qualifier :+ 
attr.name).mkString(".")).toSet
+    val inputTablesByPlan = mutable.HashSet[String]()
+    val columnsLineage = extractColumnsLineage(
+      plan,
+      ListMap[Attribute, AttributeSet](),
+      inputTablesByPlan).toList.collect {
+      case (k, attrs) =>
+        k.name -> attrs.map(attr => (attr.qualifier :+ 
attr.name).mkString(".")).toSet
+    }
+    val (inputTablesByColumn, outputTables) = columnsLineage
+      .foldLeft((List[String](), List[String]())) {
+        case ((inputs, outputs), (out, in)) =>
+          val x = (inputs ++ 
in.map(_.split('.').init.mkString("."))).filter(_.nonEmpty)
+          val y = outputs ++ 
List(out.split('.').init.mkString(".")).filter(_.nonEmpty)
+          (x, y)
       }
-    val (inputTables, outputTables) = columnsLineage.foldLeft((List[String](), 
List[String]())) {
-      case ((inputs, outputs), (out, in)) =>
-        val x = (inputs ++ 
in.map(_.split('.').init.mkString("."))).filter(_.nonEmpty)
-        val y = outputs ++ 
List(out.split('.').init.mkString(".")).filter(_.nonEmpty)
-        (x, y)
+    if 
(SparkContextHelper.getConf(LineageConf.LEGACY_COLLECT_INPUT_TABLES_ENABLED)) {
+      Lineage(inputTablesByColumn.distinct, outputTables.distinct, 
columnsLineage)
+    } else {
+      val filteredInputTables = inputTablesByPlan -- outputTables.distinct

Review Comment:
   I also filtered out the outputTables here. Some plans, such as merge into, 
will insert their own data into themselves, but databricks does not count this 
part of the input table. I am not sure if this is appropriate. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to