[ 
https://issues.apache.org/jira/browse/SPARK-35252?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

lynn updated SPARK-35252:
-------------------------
    Description: 
The codes of "MyPartitionReaderFactory" of 
{code:java}
// Implemention Class
package com.lynn.spark.sql.v2

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader, 
PartitionReaderFactory}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.vectorized.ColumnarBatch

case class MyPartitionReaderFactory(sqlConf: SQLConf,
                                    dataSchema: StructType,
                                    readSchema: StructType)
  extends PartitionReaderFactory with Logging {

  def batchSize = sqlConf.getConf("spark.sql.my.vectorized.batch.size")

  override def createReader(partition: InputPartition): 
PartitionReader[InternalRow] = {
    MyRowReader(batchSize, dataSchema, readSchema)
  }

  override def createColumnarReader(partition: InputPartition): 
PartitionReader[ColumnarBatch] = {
    if(!supportColumnarReads(partition))
      throw new UnsupportedOperationException("Cannot create columnar reader.")

    //TODO: delete this line
    println(sqlConf.getAllConfs)

    MyColumnReader(batchSize, dataSchema, readSchema)

  }

  override def supportColumnarReads(partition: InputPartition) = true

}
{code}

  was:
The codes of 
{code:java}
// Implemention Class
package com.lynn.spark.sql.v2

import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader, 
PartitionReaderFactory}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.vectorized.ColumnarBatch

case class MyPartitionReaderFactory(sqlConf: SQLConf,
                                    dataSchema: StructType,
                                    readSchema: StructType)
  extends PartitionReaderFactory with Logging {

  def batchSize = sqlConf.getConf("spark.sql.my.vectorized.batch.size")

  override def createReader(partition: InputPartition): 
PartitionReader[InternalRow] = {
    MyRowReader(batchSize, dataSchema, readSchema)
  }

  override def createColumnarReader(partition: InputPartition): 
PartitionReader[ColumnarBatch] = {
    if(!supportColumnarReads(partition))
      throw new UnsupportedOperationException("Cannot create columnar reader.")

    //TODO: delete this line
    println(sqlConf.getAllConfs)

    MyColumnReader(batchSize, dataSchema, readSchema)

  }

  override def supportColumnarReads(partition: InputPartition) = true

}
{code}


> PartitionReaderFactory's Implemention Class of DataSourceV2 sqlConf parameter 
> is null
> -------------------------------------------------------------------------------------
>
>                 Key: SPARK-35252
>                 URL: https://issues.apache.org/jira/browse/SPARK-35252
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 3.0.2
>            Reporter: lynn
>            Priority: Major
>
> The codes of "MyPartitionReaderFactory" of 
> {code:java}
> // Implemention Class
> package com.lynn.spark.sql.v2
> import org.apache.spark.internal.Logging
> import org.apache.spark.sql.catalyst.InternalRow
> import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader, 
> PartitionReaderFactory}
> import org.apache.spark.sql.internal.SQLConf
> import org.apache.spark.sql.types.StructType
> import org.apache.spark.sql.vectorized.ColumnarBatch
> case class MyPartitionReaderFactory(sqlConf: SQLConf,
>                                     dataSchema: StructType,
>                                     readSchema: StructType)
>   extends PartitionReaderFactory with Logging {
>   def batchSize = sqlConf.getConf("spark.sql.my.vectorized.batch.size")
>   override def createReader(partition: InputPartition): 
> PartitionReader[InternalRow] = {
>     MyRowReader(batchSize, dataSchema, readSchema)
>   }
>   override def createColumnarReader(partition: InputPartition): 
> PartitionReader[ColumnarBatch] = {
>     if(!supportColumnarReads(partition))
>       throw new UnsupportedOperationException("Cannot create columnar 
> reader.")
>     //TODO: delete this line
>     println(sqlConf.getAllConfs)
>     MyColumnReader(batchSize, dataSchema, readSchema)
>   }
>   override def supportColumnarReads(partition: InputPartition) = true
> }
> {code}



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@spark.apache.org
For additional commands, e-mail: issues-h...@spark.apache.org

Reply via email to