[ 
https://issues.apache.org/jira/browse/HIVE-20616?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16785262#comment-16785262
 ] 

Daniel Dai commented on HIVE-20616:
-----------------------------------

PARAM_VALUE is already expanded in HIVE-20221. Do you want to add this change 
to a particular upgrade path?

> Dynamic Partition Insert failed if PART_VALUE exceeds 4000 chars
> ----------------------------------------------------------------
>
>                 Key: HIVE-20616
>                 URL: https://issues.apache.org/jira/browse/HIVE-20616
>             Project: Hive
>          Issue Type: Bug
>            Reporter: Rajkumar Singh
>            Assignee: Rajkumar Singh
>            Priority: Major
>         Attachments: HIVE-20616.patch
>
>
> with mysql as metastore db the PARTITION_PARAMS.PARAM_VALUE defined as 
> varchar(4000)
> {code}
> describe PARTITION_PARAMS; 
> +-------------+---------------+------+-----+---------+-------+ 
> | Field | Type | Null | Key | Default | Extra | 
> +-------------+---------------+------+-----+---------+-------+ 
> | PART_ID | bigint(20) | NO | PRI | NULL | | 
> | PARAM_KEY | varchar(256) | NO | PRI | NULL | | 
> | PARAM_VALUE | varchar(4000) | YES | | NULL | | 
> +-------------+---------------+------+-----+---------+-------+ 
> {code}
> which lead to the MoveTask failure if PART_VALUE excceeds 4000 chars.
> {code}
> org.datanucleus.store.rdbms.exceptions.MappedDatastoreException: INSERT INTO 
> `PARTITION_PARAMS` (`PARAM_VALUE`,`PART_ID`,`PARAM_KEY`) VALUES (?,?,?)
>      at 
> org.datanucleus.store.rdbms.scostore.JoinMapStore.internalPut(JoinMapStore.java:1074)
>      at 
> org.datanucleus.store.rdbms.scostore.JoinMapStore.putAll(JoinMapStore.java:224)
>      at 
> org.datanucleus.store.rdbms.mapping.java.MapMapping.postInsert(MapMapping.java:158)
>      at 
> org.datanucleus.store.rdbms.request.InsertRequest.execute(InsertRequest.java:522)
>      at 
> org.datanucleus.store.rdbms.RDBMSPersistenceHandler.insertObjectInTable(RDBMSPersistenceHandler.java:162)
>      at 
> org.datanucleus.store.rdbms.RDBMSPersistenceHandler.insertObject(RDBMSPersistenceHandler.java:138)
>      at 
> org.datanucleus.state.StateManagerImpl.internalMakePersistent(StateManagerImpl.java:3363)
>      at 
> org.datanucleus.state.StateManagerImpl.makePersistent(StateManagerImpl.java:3339)
>      at 
> org.datanucleus.ExecutionContextImpl.persistObjectInternal(ExecutionContextImpl.java:2080)
>      at 
> org.datanucleus.ExecutionContextImpl.persistObjectWork(ExecutionContextImpl.java:1923)
>      at 
> org.datanucleus.ExecutionContextImpl.persistObject(ExecutionContextImpl.java:1778)
>      at 
> org.datanucleus.ExecutionContextThreadedImpl.persistObject(ExecutionContextThreadedImpl.java:217)
>      at 
> org.datanucleus.api.jdo.JDOPersistenceManager.jdoMakePersistent(JDOPersistenceManager.java:724)
>      at 
> org.datanucleus.api.jdo.JDOPersistenceManager.makePersistent(JDOPersistenceManager.java:749)
>      at 
> org.apache.hadoop.hive.metastore.ObjectStore.addPartition(ObjectStore.java:2442)
>      at sun.reflect.GeneratedMethodAccessor56.invoke(Unknown Source)
>      at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>      at java.lang.reflect.Method.invoke(Method.java:498)
>      at 
> org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:97)
>      at com.sun.proxy.$Proxy32.addPartition(Unknown Source)
>      at 
> org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.add_partition_core(HiveMetaStore.java:3976)
>      at 
> org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.add_partition_with_environment_context(HiveMetaStore.java:4032)
>      at sun.reflect.GeneratedMethodAccessor54.invoke(Unknown Source)
>      at 
> sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>      at java.lang.reflect.Method.invoke(Method.java:498)
>      at 
> org.apache.hadoop.hive.metastore.RetryingHMSHandler.invokeInternal(RetryingHMSHandler.java:147)
>      at 
> org.apache.hadoop.hive.metastore.RetryingHMSHandler.invoke(RetryingHMSHandler.java:108)
>      at com.sun.proxy.$Proxy34.add_partition_with_environment_context(Unknown 
> Source)
>      at 
> org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$add_partition_with_environment_context.getResult(ThriftHiveMetastore.java:15528)
>      at 
> org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore$Processor$add_partition_with_environment_context.getResult(ThriftHiveMetastore.java:15512)
>      at org.apache.thrift.ProcessFunction.process(ProcessFunction.java:39)
>      at org.apache.thrift.TBaseProcessor.process(TBaseProcessor.java:39)
>      at 
> org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:636)
>      at 
> org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor$1.run(HadoopThriftAuthBridge.java:631)
>      at java.security.AccessController.doPrivileged(Native Method)
>      at javax.security.auth.Subject.doAs(Subject.java:422)
>      at 
> org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1688)
>      at 
> org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge$Server$TUGIAssumingProcessor.process(HadoopThriftAuthBridge.java:631)
>      at 
> org.apache.thrift.server.TThreadPoolServer$WorkerProcess.run(TThreadPoolServer.java:286)
> Caused by: com.mysql.jdbc.MysqlDataTruncation: Data truncation: Data too long 
> for column 'PARAM_VALUE' at row 1
>      at com.mysql.jdbc.MysqlIO.checkErrorPacket(MysqlIO.java:4185)
>      at com.mysql.jdbc.MysqlIO.checkErrorPacket(MysqlIO.java:4119)
>      at com.mysql.jdbc.MysqlIO.sendCommand(MysqlIO.java:2570)
>      at com.mysql.jdbc.MysqlIO.sqlQueryDirect(MysqlIO.java:2731)
>      at com.mysql.jdbc.ConnectionImpl.execSQL(ConnectionImpl.java:2820)
>      at 
> com.mysql.jdbc.PreparedStatement.executeInternal(PreparedStatement.java:2159)
>      at 
> com.mysql.jdbc.PreparedStatement.executeUpdate(PreparedStatement.java:2462)
>      at 
> com.mysql.jdbc.PreparedStatement.executeUpdate(PreparedStatement.java:2379)
>      at 
> com.mysql.jdbc.PreparedStatement.executeUpdate(PreparedStatement.java:2363)
>      at 
> com.zaxxer.hikari.pool.ProxyPreparedStatement.executeUpdate(ProxyPreparedStatement.java:61)
>      at 
> com.zaxxer.hikari.pool.HikariProxyPreparedStatement.executeUpdate(HikariProxyPreparedStatement.java)
>      at 
> org.datanucleus.store.rdbms.ParamLoggingPreparedStatement.executeUpdate(ParamLoggingPreparedStatement.java:393)
>      at 
> org.datanucleus.store.rdbms.SQLController.executeStatementUpdate(SQLController.java:431)
>      at 
> org.datanucleus.store.rdbms.scostore.JoinMapStore.internalPut(JoinMapStore.java:1065)
>      ... 41 more
> {code}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to