cloud-fan commented on code in PR #47252:
URL: https://github.com/apache/spark/pull/47252#discussion_r1687462052


##########
sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/ColumnType.scala:
##########
@@ -815,6 +815,44 @@ private[columnar] object CALENDAR_INTERVAL extends 
ColumnType[CalendarInterval]
   }
 }
 
+/**
+ * Used to append/extract Java VariantVals into/from the underlying 
[[ByteBuffer]] of a column.
+ *
+ * Variants are encoded in `append` as:
+ * | total data size (excluding this portion) | value size | value binary | 
metadata binary |
+ * and are only expected to be decoded in `extract`.
+ */
+private[columnar] object VARIANT
+  extends ColumnType[VariantVal] with DirectCopyColumnType[VariantVal] {
+  override def dataType: PhysicalDataType = PhysicalVariantType
+
+  /** Chosen to match the default size set in `VariantType`. */
+  override def defaultSize: Int = 2048
+
+  override def getField(row: InternalRow, ordinal: Int): VariantVal = 
row.getVariant(ordinal)
+
+  override def setField(row: InternalRow, ordinal: Int, value: VariantVal): 
Unit =
+    row.update(ordinal, value)
+
+  override def append(v: VariantVal, buffer: ByteBuffer): Unit = {
+    val varLenSize: Int = 4 + v.getValue().length + v.getMetadata().length
+    ByteBufferHelper.putInt(buffer, varLenSize)
+    ByteBufferHelper.putInt(buffer, v.getValue().length)

Review Comment:
   why not simply one int for value size and one int for metadata size?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to