Github user viirya commented on a diff in the pull request: https://github.com/apache/spark/pull/21847#discussion_r206699403 --- Diff: external/avro/src/main/scala/org/apache/spark/sql/avro/AvroSerializer.scala --- @@ -165,16 +186,117 @@ class AvroSerializer(rootCatalystType: DataType, rootAvroType: Schema, nullable: result } - private def resolveNullableType(avroType: Schema, nullable: Boolean): Schema = { - if (nullable) { + // Resolve an Avro union against a supplied DataType, i.e. a LongType compared against + // a ["null", "long"] should return a schema of type Schema.Type.LONG + // This function also handles resolving a DataType against unions of 2 or more types, i.e. + // an IntType resolves against a ["int", "long", "null"] will correctly return a schema of + // type Schema.Type.LONG + private def resolveUnionType(avroType: Schema, catalystType: DataType, + nullable: Boolean): Schema = { + if (avroType.getType == Type.UNION) { // avro uses union to represent nullable type. - val fields = avroType.getTypes.asScala - assert(fields.length == 2) - val actualType = fields.filter(_.getType != NULL) - assert(actualType.length == 1) + val fieldTypes = avroType.getTypes.asScala + + // If we're nullable, we need to have at least two types. Cases with more than two types + // are captured in test("read read-write, read-write w/ schema, read") w/ test.avro input + if (nullable && fieldTypes.length < 2) { + throw new IncompatibleSchemaException( + s"Cannot resolve nullable ${catalystType} against union type ${avroType}") + } + + val actualType = catalystType match { + case NullType => fieldTypes.filter(_.getType == Type.NULL) + case BooleanType => fieldTypes.filter(_.getType == Type.BOOLEAN) + case ByteType => fieldTypes.filter(_.getType == Type.INT) + case BinaryType => fieldTypes + .filter(x => x.getType == Type.BYTES || x.getType == Type.FIXED) + case ShortType | IntegerType => fieldTypes.filter(_.getType == Type.INT) + case LongType => fieldTypes.filter(_.getType == Type.LONG) + case FloatType => fieldTypes.filter(_.getType == Type.FLOAT) + case DoubleType => fieldTypes.filter(_.getType == Type.DOUBLE) + case d: DecimalType => fieldTypes.filter(_.getType == Type.STRING) + case StringType => fieldTypes + .filter(x => x.getType == Type.STRING || x.getType == Type.ENUM) + case DateType => fieldTypes + .filter(x => x.getType == Type.INT || x.getType == Type.LONG) --- End diff -- I've asked in previous comment: Why we need consider long? I think `DateType` uses int?
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org