lidavidm commented on code in PR #1790:
URL: https://github.com/apache/arrow-adbc/pull/1790#discussion_r1583892085


##########
go/adbc/driver/snowflake/record_reader.go:
##########
@@ -480,20 +487,87 @@ func newRecordReader(ctx context.Context, alloc 
memory.Allocator, ld gosnowflake
                        }
                }
 
+               if ld.TotalRows() == 0 {
+                       return array.NewRecordReader(schema, []arrow.Record{})
+               }
+
                bldr := array.NewRecordBuilder(alloc, schema)
                defer bldr.Release()
 
-               rec, err := jsonDataToArrow(ctx, bldr, ld)
+               rec, err := jsonDataToArrow(ctx, bldr, rawData)
                if err != nil {
                        return nil, err
                }
                defer rec.Release()
 
-               if ld.TotalRows() != 0 {
-                       return array.NewRecordReader(schema, 
[]arrow.Record{rec})
-               } else {
-                       return array.NewRecordReader(schema, []arrow.Record{})
+               results := []arrow.Record{rec}
+               for _, b := range batches {
+                       rdr, err := b.GetStream(ctx)
+                       if err != nil {
+                               return nil, adbc.Error{
+                                       Msg:  err.Error(),
+                                       Code: adbc.StatusInternal,
+                               }
+                       }
+                       defer rdr.Close()
+
+                       // the "JSON" data returned isn't valid JSON. Instead 
it is a list of
+                       // comma-delimited JSON lists containing every value as 
a string, except
+                       // for a JSON null to represent nulls. Thus we can't 
just use the existing
+                       // JSON parsing code in Arrow.

Review Comment:
   oh joy



##########
go/adbc/driver/snowflake/record_reader.go:
##########
@@ -480,20 +487,87 @@ func newRecordReader(ctx context.Context, alloc 
memory.Allocator, ld gosnowflake
                        }
                }
 
+               if ld.TotalRows() == 0 {
+                       return array.NewRecordReader(schema, []arrow.Record{})
+               }
+
                bldr := array.NewRecordBuilder(alloc, schema)
                defer bldr.Release()
 
-               rec, err := jsonDataToArrow(ctx, bldr, ld)
+               rec, err := jsonDataToArrow(ctx, bldr, rawData)
                if err != nil {
                        return nil, err
                }
                defer rec.Release()
 
-               if ld.TotalRows() != 0 {
-                       return array.NewRecordReader(schema, 
[]arrow.Record{rec})
-               } else {
-                       return array.NewRecordReader(schema, []arrow.Record{})
+               results := []arrow.Record{rec}
+               for _, b := range batches {
+                       rdr, err := b.GetStream(ctx)
+                       if err != nil {
+                               return nil, adbc.Error{
+                                       Msg:  err.Error(),
+                                       Code: adbc.StatusInternal,
+                               }
+                       }
+                       defer rdr.Close()
+
+                       // the "JSON" data returned isn't valid JSON. Instead 
it is a list of
+                       // comma-delimited JSON lists containing every value as 
a string, except
+                       // for a JSON null to represent nulls. Thus we can't 
just use the existing
+                       // JSON parsing code in Arrow.
+                       data, err := io.ReadAll(rdr)
+                       if err != nil {
+                               return nil, adbc.Error{
+                                       Msg:  err.Error(),
+                                       Code: adbc.StatusInternal,
+                               }
+                       }
+
+                       if cap(rawData) >= int(b.NumRows()) {
+                               rawData = rawData[:b.NumRows()]

Review Comment:
   Why are we truncating here?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to