Repository: arrow
Updated Branches:
  refs/heads/master ea1b67ceb -> 4108bda82


ARROW-1291: [Python] Cast non-string DataFrame columns to strings in 
RecordBatch/Table.from_pandas

Author: Wes McKinney <[email protected]>

Closes #911 from wesm/ARROW-1291 and squashes the following commits:

d442f3bb [Wes McKinney] Cast non-string DataFrame columns to strings in 
RecordBatch/Table.from_pandas


Project: http://git-wip-us.apache.org/repos/asf/arrow/repo
Commit: http://git-wip-us.apache.org/repos/asf/arrow/commit/4108bda8
Tree: http://git-wip-us.apache.org/repos/asf/arrow/tree/4108bda8
Diff: http://git-wip-us.apache.org/repos/asf/arrow/diff/4108bda8

Branch: refs/heads/master
Commit: 4108bda82b6574adc95e371b328cee748cd1fbce
Parents: ea1b67c
Author: Wes McKinney <[email protected]>
Authored: Sat Jul 29 13:55:15 2017 -0400
Committer: Wes McKinney <[email protected]>
Committed: Sat Jul 29 13:55:15 2017 -0400

----------------------------------------------------------------------
 python/pyarrow/pandas_compat.py             | 96 ++++++++++++++++--------
 python/pyarrow/table.pxi                    | 49 +-----------
 python/pyarrow/tests/test_convert_pandas.py |  5 ++
 python/pyarrow/tests/test_ipc.py            |  2 +-
 4 files changed, 74 insertions(+), 78 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/arrow/blob/4108bda8/python/pyarrow/pandas_compat.py
----------------------------------------------------------------------
diff --git a/python/pyarrow/pandas_compat.py b/python/pyarrow/pandas_compat.py
index 9b2a5c4..cd7ad47 100644
--- a/python/pyarrow/pandas_compat.py
+++ b/python/pyarrow/pandas_compat.py
@@ -155,7 +155,7 @@ def index_level_name(index, i):
         return '__index_level_{:d}__'.format(i)
 
 
-def construct_metadata(df, index_levels, preserve_index, types):
+def construct_metadata(df, column_names, index_levels, preserve_index, types):
     """Returns a dictionary containing enough metadata to reconstruct a pandas
     DataFrame as an Arrow Table, including index columns.
 
@@ -170,41 +170,77 @@ def construct_metadata(df, index_levels, preserve_index, 
types):
     -------
     dict
     """
-    ncolumns = len(df.columns)
+    ncolumns = len(column_names)
     df_types = types[:ncolumns]
     index_types = types[ncolumns:ncolumns + len(index_levels)]
+
+    column_metadata = [
+        get_column_metadata(df[col_name], name=sanitized_name,
+                            arrow_type=arrow_type)
+        for col_name, sanitized_name, arrow_type in
+        zip(df.columns, column_names, df_types)
+    ]
+
+    if preserve_index:
+        index_column_names = [index_level_name(level, i)
+                              for i, level in enumerate(index_levels)]
+        index_column_metadata = [
+            get_column_metadata(level, name=index_level_name(level, i),
+                                arrow_type=arrow_type)
+            for i, (level, arrow_type) in enumerate(zip(index_levels,
+                                                        index_types))
+        ]
+    else:
+        index_column_names = index_column_metadata = []
+
     return {
-        b'pandas': json.dumps(
-            {
-                'index_columns': [
-                    index_level_name(level, i)
-                    for i, level in enumerate(index_levels)
-                ] if preserve_index else [],
-                'columns': [
-                    get_column_metadata(
-                        df[name],
-                        name=name,
-                        arrow_type=arrow_type
-                    )
-                    for name, arrow_type in zip(df.columns, df_types)
-                ] + (
-                    [
-                        get_column_metadata(
-                            level,
-                            name=index_level_name(level, i),
-                            arrow_type=arrow_type
-                        )
-                        for i, (level, arrow_type) in enumerate(
-                            zip(index_levels, index_types)
-                        )
-                    ] if preserve_index else []
-                ),
-                'pandas_version': pd.__version__,
-            }
-        ).encode('utf8')
+        b'pandas': json.dumps({
+            'index_columns': index_column_names,
+            'columns': column_metadata + index_column_metadata,
+            'pandas_version': pd.__version__
+        }).encode('utf8')
     }
 
 
+def dataframe_to_arrays(df, timestamps_to_ms, schema, preserve_index):
+    names = []
+    arrays = []
+    index_columns = []
+    types = []
+    type = None
+
+    if preserve_index:
+        n = len(getattr(df.index, 'levels', [df.index]))
+        index_columns.extend(df.index.get_level_values(i) for i in range(n))
+
+    for name in df.columns:
+        col = df[name]
+        if not isinstance(name, six.string_types):
+            name = str(name)
+
+        if schema is not None:
+            field = schema.field_by_name(name)
+            type = getattr(field, "type", None)
+
+        array = pa.Array.from_pandas(
+            col, type=type, timestamps_to_ms=timestamps_to_ms
+        )
+        arrays.append(array)
+        names.append(name)
+        types.append(array.type)
+
+    for i, column in enumerate(index_columns):
+        array = pa.Array.from_pandas(column, timestamps_to_ms=timestamps_to_ms)
+        arrays.append(array)
+        names.append(index_level_name(column, i))
+        types.append(array.type)
+
+    metadata = construct_metadata(
+        df, names, index_columns, preserve_index, types
+    )
+    return names, arrays, metadata
+
+
 def table_to_blockmanager(table, nthreads=1):
     import pandas.core.internals as _int
     from pyarrow.compat import DatetimeTZDtype

http://git-wip-us.apache.org/repos/asf/arrow/blob/4108bda8/python/pyarrow/table.pxi
----------------------------------------------------------------------
diff --git a/python/pyarrow/table.pxi b/python/pyarrow/table.pxi
index a9cb064..c1d5a50 100644
--- a/python/pyarrow/table.pxi
+++ b/python/pyarrow/table.pxi
@@ -317,51 +317,6 @@ cdef int _schema_from_arrays(
     return 0
 
 
-cdef tuple _dataframe_to_arrays(
-    df,
-    bint timestamps_to_ms,
-    Schema schema,
-    bint preserve_index
-):
-    cdef:
-        list names = []
-        list arrays = []
-        list index_columns = []
-        list types = []
-        DataType type = None
-        dict metadata
-        Py_ssize_t i
-        Py_ssize_t n
-
-    if preserve_index:
-        n = len(getattr(df.index, 'levels', [df.index]))
-        index_columns.extend(df.index.get_level_values(i) for i in range(n))
-
-    for name in df.columns:
-        col = df[name]
-        if schema is not None:
-            field = schema.field_by_name(name)
-            type = getattr(field, "type", None)
-
-        array = Array.from_pandas(
-            col, type=type, timestamps_to_ms=timestamps_to_ms
-        )
-        arrays.append(array)
-        names.append(name)
-        types.append(array.type)
-
-    for i, column in enumerate(index_columns):
-        array = Array.from_pandas(column, timestamps_to_ms=timestamps_to_ms)
-        arrays.append(array)
-        names.append(pdcompat.index_level_name(column, i))
-        types.append(array.type)
-
-    metadata = pdcompat.construct_metadata(
-        df, index_columns, preserve_index, types
-    )
-    return names, arrays, metadata
-
-
 cdef class RecordBatch:
     """
     Batch of rows of columns of equal length
@@ -570,7 +525,7 @@ cdef class RecordBatch:
         -------
         pyarrow.RecordBatch
         """
-        names, arrays, metadata = _dataframe_to_arrays(
+        names, arrays, metadata = pdcompat.dataframe_to_arrays(
             df, False, schema, preserve_index
         )
         return cls.from_arrays(arrays, names, metadata)
@@ -748,7 +703,7 @@ cdef class Table:
         >>> pa.Table.from_pandas(df)
         <pyarrow.lib.Table object at 0x7f05d1fb1b40>
         """
-        names, arrays, metadata = _dataframe_to_arrays(
+        names, arrays, metadata = pdcompat.dataframe_to_arrays(
             df,
             timestamps_to_ms=timestamps_to_ms,
             schema=schema,

http://git-wip-us.apache.org/repos/asf/arrow/blob/4108bda8/python/pyarrow/tests/test_convert_pandas.py
----------------------------------------------------------------------
diff --git a/python/pyarrow/tests/test_convert_pandas.py 
b/python/pyarrow/tests/test_convert_pandas.py
index 43e0bad..d488658 100644
--- a/python/pyarrow/tests/test_convert_pandas.py
+++ b/python/pyarrow/tests/test_convert_pandas.py
@@ -109,6 +109,11 @@ class TestPandasConversion(unittest.TestCase):
         df['a'] = df['a'].astype('category')
         self._check_pandas_roundtrip(df)
 
+    def test_non_string_columns(self):
+        df = pd.DataFrame({0: [1, 2, 3]})
+        table = pa.Table.from_pandas(df)
+        assert table.column(0).name == '0'
+
     def test_float_no_nulls(self):
         data = {}
         fields = []

http://git-wip-us.apache.org/repos/asf/arrow/blob/4108bda8/python/pyarrow/tests/test_ipc.py
----------------------------------------------------------------------
diff --git a/python/pyarrow/tests/test_ipc.py b/python/pyarrow/tests/test_ipc.py
index bcaca6d..3ad369c 100644
--- a/python/pyarrow/tests/test_ipc.py
+++ b/python/pyarrow/tests/test_ipc.py
@@ -360,7 +360,7 @@ def test_pandas_serialize_round_trip_multi_index():
 
 
 @pytest.mark.xfail(
-    raises=TypeError,
+    raises=AssertionError,
     reason='Non string columns are not supported',
 )
 def test_pandas_serialize_round_trip_not_string_columns():

Reply via email to