Github user imatiach-msft commented on a diff in the pull request:

    https://github.com/apache/spark/pull/22328#discussion_r214975055
  
    --- Diff: 
mllib/src/main/scala/org/apache/spark/ml/source/image/ImageFileFormat.scala ---
    @@ -0,0 +1,109 @@
    +/*
    + * Licensed to the Apache Software Foundation (ASF) under one or more
    + * contributor license agreements.  See the NOTICE file distributed with
    + * this work for additional information regarding copyright ownership.
    + * The ASF licenses this file to You under the Apache License, Version 2.0
    + * (the "License"); you may not use this file except in compliance with
    + * the License.  You may obtain a copy of the License at
    + *
    + *    http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.spark.ml.source.image
    +
    +import com.google.common.io.{ByteStreams, Closeables}
    +import org.apache.hadoop.conf.Configuration
    +import org.apache.hadoop.fs.{FileStatus, Path}
    +import org.apache.hadoop.mapreduce.Job
    +
    +import org.apache.spark.ml.image.ImageSchema
    +import org.apache.spark.sql.SparkSession
    +import org.apache.spark.sql.catalyst.InternalRow
    +import org.apache.spark.sql.catalyst.encoders.RowEncoder
    +import org.apache.spark.sql.catalyst.expressions.{AttributeReference, 
UnsafeRow}
    +import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
    +import org.apache.spark.sql.execution.datasources.{DataSource, FileFormat, 
OutputWriterFactory, PartitionedFile}
    +import org.apache.spark.sql.sources.{DataSourceRegister, Filter}
    +import org.apache.spark.sql.types.StructType
    +import org.apache.spark.util.SerializableConfiguration
    +
    +
    +private[image] class ImageFileFormatOptions(
    +    @transient private val parameters: CaseInsensitiveMap[String]) extends 
Serializable {
    +
    +  def this(parameters: Map[String, String]) = 
this(CaseInsensitiveMap(parameters))
    +
    +  val dropImageFailures = parameters.getOrElse("dropImageFailures", 
"false").toBoolean
    +}
    +
    +private[image] class ImageFileFormat extends FileFormat with 
DataSourceRegister {
    +
    +  override def inferSchema(
    +      sparkSession: SparkSession,
    +      options: Map[String, String],
    +      files: Seq[FileStatus]): Option[StructType] = 
Some(ImageSchema.imageSchema)
    +
    +  override def prepareWrite(
    +      sparkSession: SparkSession,
    +      job: Job, options: Map[String, String],
    +      dataSchema: StructType): OutputWriterFactory = {
    +    throw new UnsupportedOperationException(
    +      s"prepareWrite is not supported for image data source")
    +  }
    +
    +  override def shortName(): String = "image"
    +
    +  override protected def buildReader(
    +      sparkSession: SparkSession,
    +      dataSchema: StructType,
    +      partitionSchema: StructType,
    +      requiredSchema: StructType,
    +      filters: Seq[Filter],
    +      options: Map[String, String],
    +      hadoopConf: Configuration): (PartitionedFile) => 
Iterator[InternalRow] = {
    --- End diff --
    
    should the sampling option be ported as well?  It seemed like an important 
option in case users didn't want to load all images.


---

---------------------------------------------------------------------
To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org
For additional commands, e-mail: reviews-h...@spark.apache.org

Reply via email to