http://git-wip-us.apache.org/repos/asf/hive/blob/926c1e8e/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index b6a0893..1bdbbbf 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -374,6 +374,20 @@ import org.slf4j.LoggerFactory; public void add_dynamic_partitions(AddDynamicPartitions rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException; + public OptionalCompactionInfoStruct find_next_compact(String workerId) throws MetaException, org.apache.thrift.TException; + + public void update_compactor_state(CompactionInfoStruct cr, long txn_id) throws org.apache.thrift.TException; + + public List<String> find_columns_with_stats(CompactionInfoStruct cr) throws org.apache.thrift.TException; + + public void mark_cleaned(CompactionInfoStruct cr) throws MetaException, org.apache.thrift.TException; + + public void mark_compacted(CompactionInfoStruct cr) throws MetaException, org.apache.thrift.TException; + + public void mark_failed(CompactionInfoStruct cr) throws MetaException, org.apache.thrift.TException; + + public void set_hadoop_jobid(String jobId, long cq_id) throws org.apache.thrift.TException; + public NotificationEventResponse get_next_notification(NotificationEventRequest rqst) throws org.apache.thrift.TException; public CurrentNotificationEventId get_current_notificationEventId() throws org.apache.thrift.TException; @@ -808,6 +822,20 @@ import org.slf4j.LoggerFactory; public void add_dynamic_partitions(AddDynamicPartitions rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void find_next_compact(String workerId, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void update_compactor_state(CompactionInfoStruct cr, long txn_id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void find_columns_with_stats(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void mark_cleaned(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void mark_compacted(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void mark_failed(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void set_hadoop_jobid(String jobId, long cq_id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_next_notification(NotificationEventRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void get_current_notificationEventId(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -5749,6 +5777,166 @@ import org.slf4j.LoggerFactory; return; } + public OptionalCompactionInfoStruct find_next_compact(String workerId) throws MetaException, org.apache.thrift.TException + { + send_find_next_compact(workerId); + return recv_find_next_compact(); + } + + public void send_find_next_compact(String workerId) throws org.apache.thrift.TException + { + find_next_compact_args args = new find_next_compact_args(); + args.setWorkerId(workerId); + sendBase("find_next_compact", args); + } + + public OptionalCompactionInfoStruct recv_find_next_compact() throws MetaException, org.apache.thrift.TException + { + find_next_compact_result result = new find_next_compact_result(); + receiveBase(result, "find_next_compact"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "find_next_compact failed: unknown result"); + } + + public void update_compactor_state(CompactionInfoStruct cr, long txn_id) throws org.apache.thrift.TException + { + send_update_compactor_state(cr, txn_id); + recv_update_compactor_state(); + } + + public void send_update_compactor_state(CompactionInfoStruct cr, long txn_id) throws org.apache.thrift.TException + { + update_compactor_state_args args = new update_compactor_state_args(); + args.setCr(cr); + args.setTxn_id(txn_id); + sendBase("update_compactor_state", args); + } + + public void recv_update_compactor_state() throws org.apache.thrift.TException + { + update_compactor_state_result result = new update_compactor_state_result(); + receiveBase(result, "update_compactor_state"); + return; + } + + public List<String> find_columns_with_stats(CompactionInfoStruct cr) throws org.apache.thrift.TException + { + send_find_columns_with_stats(cr); + return recv_find_columns_with_stats(); + } + + public void send_find_columns_with_stats(CompactionInfoStruct cr) throws org.apache.thrift.TException + { + find_columns_with_stats_args args = new find_columns_with_stats_args(); + args.setCr(cr); + sendBase("find_columns_with_stats", args); + } + + public List<String> recv_find_columns_with_stats() throws org.apache.thrift.TException + { + find_columns_with_stats_result result = new find_columns_with_stats_result(); + receiveBase(result, "find_columns_with_stats"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "find_columns_with_stats failed: unknown result"); + } + + public void mark_cleaned(CompactionInfoStruct cr) throws MetaException, org.apache.thrift.TException + { + send_mark_cleaned(cr); + recv_mark_cleaned(); + } + + public void send_mark_cleaned(CompactionInfoStruct cr) throws org.apache.thrift.TException + { + mark_cleaned_args args = new mark_cleaned_args(); + args.setCr(cr); + sendBase("mark_cleaned", args); + } + + public void recv_mark_cleaned() throws MetaException, org.apache.thrift.TException + { + mark_cleaned_result result = new mark_cleaned_result(); + receiveBase(result, "mark_cleaned"); + if (result.o1 != null) { + throw result.o1; + } + return; + } + + public void mark_compacted(CompactionInfoStruct cr) throws MetaException, org.apache.thrift.TException + { + send_mark_compacted(cr); + recv_mark_compacted(); + } + + public void send_mark_compacted(CompactionInfoStruct cr) throws org.apache.thrift.TException + { + mark_compacted_args args = new mark_compacted_args(); + args.setCr(cr); + sendBase("mark_compacted", args); + } + + public void recv_mark_compacted() throws MetaException, org.apache.thrift.TException + { + mark_compacted_result result = new mark_compacted_result(); + receiveBase(result, "mark_compacted"); + if (result.o1 != null) { + throw result.o1; + } + return; + } + + public void mark_failed(CompactionInfoStruct cr) throws MetaException, org.apache.thrift.TException + { + send_mark_failed(cr); + recv_mark_failed(); + } + + public void send_mark_failed(CompactionInfoStruct cr) throws org.apache.thrift.TException + { + mark_failed_args args = new mark_failed_args(); + args.setCr(cr); + sendBase("mark_failed", args); + } + + public void recv_mark_failed() throws MetaException, org.apache.thrift.TException + { + mark_failed_result result = new mark_failed_result(); + receiveBase(result, "mark_failed"); + if (result.o1 != null) { + throw result.o1; + } + return; + } + + public void set_hadoop_jobid(String jobId, long cq_id) throws org.apache.thrift.TException + { + send_set_hadoop_jobid(jobId, cq_id); + recv_set_hadoop_jobid(); + } + + public void send_set_hadoop_jobid(String jobId, long cq_id) throws org.apache.thrift.TException + { + set_hadoop_jobid_args args = new set_hadoop_jobid_args(); + args.setJobId(jobId); + args.setCq_id(cq_id); + sendBase("set_hadoop_jobid", args); + } + + public void recv_set_hadoop_jobid() throws org.apache.thrift.TException + { + set_hadoop_jobid_result result = new set_hadoop_jobid_result(); + receiveBase(result, "set_hadoop_jobid"); + return; + } + public NotificationEventResponse get_next_notification(NotificationEventRequest rqst) throws org.apache.thrift.TException { send_get_next_notification(rqst); @@ -12921,6 +13109,236 @@ import org.slf4j.LoggerFactory; } } + public void find_next_compact(String workerId, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + find_next_compact_call method_call = new find_next_compact_call(workerId, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class find_next_compact_call extends org.apache.thrift.async.TAsyncMethodCall { + private String workerId; + public find_next_compact_call(String workerId, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.workerId = workerId; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("find_next_compact", org.apache.thrift.protocol.TMessageType.CALL, 0)); + find_next_compact_args args = new find_next_compact_args(); + args.setWorkerId(workerId); + args.write(prot); + prot.writeMessageEnd(); + } + + public OptionalCompactionInfoStruct getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_find_next_compact(); + } + } + + public void update_compactor_state(CompactionInfoStruct cr, long txn_id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + update_compactor_state_call method_call = new update_compactor_state_call(cr, txn_id, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_compactor_state_call extends org.apache.thrift.async.TAsyncMethodCall { + private CompactionInfoStruct cr; + private long txn_id; + public update_compactor_state_call(CompactionInfoStruct cr, long txn_id, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cr = cr; + this.txn_id = txn_id; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("update_compactor_state", org.apache.thrift.protocol.TMessageType.CALL, 0)); + update_compactor_state_args args = new update_compactor_state_args(); + args.setCr(cr); + args.setTxn_id(txn_id); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_update_compactor_state(); + } + } + + public void find_columns_with_stats(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + find_columns_with_stats_call method_call = new find_columns_with_stats_call(cr, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class find_columns_with_stats_call extends org.apache.thrift.async.TAsyncMethodCall { + private CompactionInfoStruct cr; + public find_columns_with_stats_call(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cr = cr; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("find_columns_with_stats", org.apache.thrift.protocol.TMessageType.CALL, 0)); + find_columns_with_stats_args args = new find_columns_with_stats_args(); + args.setCr(cr); + args.write(prot); + prot.writeMessageEnd(); + } + + public List<String> getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_find_columns_with_stats(); + } + } + + public void mark_cleaned(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + mark_cleaned_call method_call = new mark_cleaned_call(cr, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class mark_cleaned_call extends org.apache.thrift.async.TAsyncMethodCall { + private CompactionInfoStruct cr; + public mark_cleaned_call(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cr = cr; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mark_cleaned", org.apache.thrift.protocol.TMessageType.CALL, 0)); + mark_cleaned_args args = new mark_cleaned_args(); + args.setCr(cr); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_mark_cleaned(); + } + } + + public void mark_compacted(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + mark_compacted_call method_call = new mark_compacted_call(cr, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class mark_compacted_call extends org.apache.thrift.async.TAsyncMethodCall { + private CompactionInfoStruct cr; + public mark_compacted_call(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cr = cr; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mark_compacted", org.apache.thrift.protocol.TMessageType.CALL, 0)); + mark_compacted_args args = new mark_compacted_args(); + args.setCr(cr); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_mark_compacted(); + } + } + + public void mark_failed(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + mark_failed_call method_call = new mark_failed_call(cr, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class mark_failed_call extends org.apache.thrift.async.TAsyncMethodCall { + private CompactionInfoStruct cr; + public mark_failed_call(CompactionInfoStruct cr, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.cr = cr; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("mark_failed", org.apache.thrift.protocol.TMessageType.CALL, 0)); + mark_failed_args args = new mark_failed_args(); + args.setCr(cr); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_mark_failed(); + } + } + + public void set_hadoop_jobid(String jobId, long cq_id, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + set_hadoop_jobid_call method_call = new set_hadoop_jobid_call(jobId, cq_id, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class set_hadoop_jobid_call extends org.apache.thrift.async.TAsyncMethodCall { + private String jobId; + private long cq_id; + public set_hadoop_jobid_call(String jobId, long cq_id, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.jobId = jobId; + this.cq_id = cq_id; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("set_hadoop_jobid", org.apache.thrift.protocol.TMessageType.CALL, 0)); + set_hadoop_jobid_args args = new set_hadoop_jobid_args(); + args.setJobId(jobId); + args.setCq_id(cq_id); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_set_hadoop_jobid(); + } + } + public void get_next_notification(NotificationEventRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); get_next_notification_call method_call = new get_next_notification_call(rqst, resultHandler, this, ___protocolFactory, ___transport); @@ -14671,6 +15089,13 @@ import org.slf4j.LoggerFactory; processMap.put("compact2", new compact2()); processMap.put("show_compact", new show_compact()); processMap.put("add_dynamic_partitions", new add_dynamic_partitions()); + processMap.put("find_next_compact", new find_next_compact()); + processMap.put("update_compactor_state", new update_compactor_state()); + processMap.put("find_columns_with_stats", new find_columns_with_stats()); + processMap.put("mark_cleaned", new mark_cleaned()); + processMap.put("mark_compacted", new mark_compacted()); + processMap.put("mark_failed", new mark_failed()); + processMap.put("set_hadoop_jobid", new set_hadoop_jobid()); processMap.put("get_next_notification", new get_next_notification()); processMap.put("get_current_notificationEventId", new get_current_notificationEventId()); processMap.put("get_notification_events_count", new get_notification_events_count()); @@ -19016,6 +19441,162 @@ import org.slf4j.LoggerFactory; } } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class find_next_compact<I extends Iface> extends org.apache.thrift.ProcessFunction<I, find_next_compact_args> { + public find_next_compact() { + super("find_next_compact"); + } + + public find_next_compact_args getEmptyArgsInstance() { + return new find_next_compact_args(); + } + + protected boolean isOneway() { + return false; + } + + public find_next_compact_result getResult(I iface, find_next_compact_args args) throws org.apache.thrift.TException { + find_next_compact_result result = new find_next_compact_result(); + try { + result.success = iface.find_next_compact(args.workerId); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_compactor_state<I extends Iface> extends org.apache.thrift.ProcessFunction<I, update_compactor_state_args> { + public update_compactor_state() { + super("update_compactor_state"); + } + + public update_compactor_state_args getEmptyArgsInstance() { + return new update_compactor_state_args(); + } + + protected boolean isOneway() { + return false; + } + + public update_compactor_state_result getResult(I iface, update_compactor_state_args args) throws org.apache.thrift.TException { + update_compactor_state_result result = new update_compactor_state_result(); + iface.update_compactor_state(args.cr, args.txn_id); + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class find_columns_with_stats<I extends Iface> extends org.apache.thrift.ProcessFunction<I, find_columns_with_stats_args> { + public find_columns_with_stats() { + super("find_columns_with_stats"); + } + + public find_columns_with_stats_args getEmptyArgsInstance() { + return new find_columns_with_stats_args(); + } + + protected boolean isOneway() { + return false; + } + + public find_columns_with_stats_result getResult(I iface, find_columns_with_stats_args args) throws org.apache.thrift.TException { + find_columns_with_stats_result result = new find_columns_with_stats_result(); + result.success = iface.find_columns_with_stats(args.cr); + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class mark_cleaned<I extends Iface> extends org.apache.thrift.ProcessFunction<I, mark_cleaned_args> { + public mark_cleaned() { + super("mark_cleaned"); + } + + public mark_cleaned_args getEmptyArgsInstance() { + return new mark_cleaned_args(); + } + + protected boolean isOneway() { + return false; + } + + public mark_cleaned_result getResult(I iface, mark_cleaned_args args) throws org.apache.thrift.TException { + mark_cleaned_result result = new mark_cleaned_result(); + try { + iface.mark_cleaned(args.cr); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class mark_compacted<I extends Iface> extends org.apache.thrift.ProcessFunction<I, mark_compacted_args> { + public mark_compacted() { + super("mark_compacted"); + } + + public mark_compacted_args getEmptyArgsInstance() { + return new mark_compacted_args(); + } + + protected boolean isOneway() { + return false; + } + + public mark_compacted_result getResult(I iface, mark_compacted_args args) throws org.apache.thrift.TException { + mark_compacted_result result = new mark_compacted_result(); + try { + iface.mark_compacted(args.cr); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class mark_failed<I extends Iface> extends org.apache.thrift.ProcessFunction<I, mark_failed_args> { + public mark_failed() { + super("mark_failed"); + } + + public mark_failed_args getEmptyArgsInstance() { + return new mark_failed_args(); + } + + protected boolean isOneway() { + return false; + } + + public mark_failed_result getResult(I iface, mark_failed_args args) throws org.apache.thrift.TException { + mark_failed_result result = new mark_failed_result(); + try { + iface.mark_failed(args.cr); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class set_hadoop_jobid<I extends Iface> extends org.apache.thrift.ProcessFunction<I, set_hadoop_jobid_args> { + public set_hadoop_jobid() { + super("set_hadoop_jobid"); + } + + public set_hadoop_jobid_args getEmptyArgsInstance() { + return new set_hadoop_jobid_args(); + } + + protected boolean isOneway() { + return false; + } + + public set_hadoop_jobid_result getResult(I iface, set_hadoop_jobid_args args) throws org.apache.thrift.TException { + set_hadoop_jobid_result result = new set_hadoop_jobid_result(); + iface.set_hadoop_jobid(args.jobId, args.cq_id); + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_next_notification<I extends Iface> extends org.apache.thrift.ProcessFunction<I, get_next_notification_args> { public get_next_notification() { super("get_next_notification"); @@ -20418,6 +20999,13 @@ import org.slf4j.LoggerFactory; processMap.put("compact2", new compact2()); processMap.put("show_compact", new show_compact()); processMap.put("add_dynamic_partitions", new add_dynamic_partitions()); + processMap.put("find_next_compact", new find_next_compact()); + processMap.put("update_compactor_state", new update_compactor_state()); + processMap.put("find_columns_with_stats", new find_columns_with_stats()); + processMap.put("mark_cleaned", new mark_cleaned()); + processMap.put("mark_compacted", new mark_compacted()); + processMap.put("mark_failed", new mark_failed()); + processMap.put("set_hadoop_jobid", new set_hadoop_jobid()); processMap.put("get_next_notification", new get_next_notification()); processMap.put("get_current_notificationEventId", new get_current_notificationEventId()); processMap.put("get_notification_events_count", new get_notification_events_count()); @@ -30685,6 +31273,382 @@ import org.slf4j.LoggerFactory; } } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class find_next_compact<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, find_next_compact_args, OptionalCompactionInfoStruct> { + public find_next_compact() { + super("find_next_compact"); + } + + public find_next_compact_args getEmptyArgsInstance() { + return new find_next_compact_args(); + } + + public AsyncMethodCallback<OptionalCompactionInfoStruct> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback<OptionalCompactionInfoStruct>() { + public void onComplete(OptionalCompactionInfoStruct o) { + find_next_compact_result result = new find_next_compact_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + find_next_compact_result result = new find_next_compact_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, find_next_compact_args args, org.apache.thrift.async.AsyncMethodCallback<OptionalCompactionInfoStruct> resultHandler) throws TException { + iface.find_next_compact(args.workerId,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class update_compactor_state<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, update_compactor_state_args, Void> { + public update_compactor_state() { + super("update_compactor_state"); + } + + public update_compactor_state_args getEmptyArgsInstance() { + return new update_compactor_state_args(); + } + + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback<Void>() { + public void onComplete(Void o) { + update_compactor_state_result result = new update_compactor_state_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + update_compactor_state_result result = new update_compactor_state_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, update_compactor_state_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException { + iface.update_compactor_state(args.cr, args.txn_id,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class find_columns_with_stats<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, find_columns_with_stats_args, List<String>> { + public find_columns_with_stats() { + super("find_columns_with_stats"); + } + + public find_columns_with_stats_args getEmptyArgsInstance() { + return new find_columns_with_stats_args(); + } + + public AsyncMethodCallback<List<String>> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback<List<String>>() { + public void onComplete(List<String> o) { + find_columns_with_stats_result result = new find_columns_with_stats_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + find_columns_with_stats_result result = new find_columns_with_stats_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, find_columns_with_stats_args args, org.apache.thrift.async.AsyncMethodCallback<List<String>> resultHandler) throws TException { + iface.find_columns_with_stats(args.cr,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class mark_cleaned<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, mark_cleaned_args, Void> { + public mark_cleaned() { + super("mark_cleaned"); + } + + public mark_cleaned_args getEmptyArgsInstance() { + return new mark_cleaned_args(); + } + + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback<Void>() { + public void onComplete(Void o) { + mark_cleaned_result result = new mark_cleaned_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + mark_cleaned_result result = new mark_cleaned_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, mark_cleaned_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException { + iface.mark_cleaned(args.cr,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class mark_compacted<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, mark_compacted_args, Void> { + public mark_compacted() { + super("mark_compacted"); + } + + public mark_compacted_args getEmptyArgsInstance() { + return new mark_compacted_args(); + } + + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback<Void>() { + public void onComplete(Void o) { + mark_compacted_result result = new mark_compacted_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + mark_compacted_result result = new mark_compacted_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, mark_compacted_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException { + iface.mark_compacted(args.cr,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class mark_failed<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, mark_failed_args, Void> { + public mark_failed() { + super("mark_failed"); + } + + public mark_failed_args getEmptyArgsInstance() { + return new mark_failed_args(); + } + + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback<Void>() { + public void onComplete(Void o) { + mark_failed_result result = new mark_failed_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + mark_failed_result result = new mark_failed_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, mark_failed_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException { + iface.mark_failed(args.cr,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class set_hadoop_jobid<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, set_hadoop_jobid_args, Void> { + public set_hadoop_jobid() { + super("set_hadoop_jobid"); + } + + public set_hadoop_jobid_args getEmptyArgsInstance() { + return new set_hadoop_jobid_args(); + } + + public AsyncMethodCallback<Void> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback<Void>() { + public void onComplete(Void o) { + set_hadoop_jobid_result result = new set_hadoop_jobid_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + set_hadoop_jobid_result result = new set_hadoop_jobid_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, set_hadoop_jobid_args args, org.apache.thrift.async.AsyncMethodCallback<Void> resultHandler) throws TException { + iface.set_hadoop_jobid(args.jobId, args.cq_id,resultHandler); + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_next_notification<I extends AsyncIface> extends org.apache.thrift.AsyncProcessFunction<I, get_next_notification_args, NotificationEventResponse> { public get_next_notification() { super("get_next_notification"); @@ -43489,13 +44453,13 @@ import org.slf4j.LoggerFactory; case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1008 = iprot.readListBegin(); - struct.success = new ArrayList<String>(_list1008.size); - String _elem1009; - for (int _i1010 = 0; _i1010 < _list1008.size; ++_i1010) + org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); + struct.success = new ArrayList<String>(_list1016.size); + String _elem1017; + for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) { - _elem1009 = iprot.readString(); - struct.success.add(_elem1009); + _elem1017 = iprot.readString(); + struct.success.add(_elem1017); } iprot.readListEnd(); } @@ -43530,9 +44494,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1011 : struct.success) + for (String _iter1019 : struct.success) { - oprot.writeString(_iter1011); + oprot.writeString(_iter1019); } oprot.writeListEnd(); } @@ -43571,9 +44535,9 @@ import org.slf4j.LoggerFactory; if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1012 : struct.success) + for (String _iter1020 : struct.success) { - oprot.writeString(_iter1012); + oprot.writeString(_iter1020); } } } @@ -43588,13 +44552,13 @@ import org.slf4j.LoggerFactory; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1013 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList<String>(_list1013.size); - String _elem1014; - for (int _i1015 = 0; _i1015 < _list1013.size; ++_i1015) + org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList<String>(_list1021.size); + String _elem1022; + for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) { - _elem1014 = iprot.readString(); - struct.success.add(_elem1014); + _elem1022 = iprot.readString(); + struct.success.add(_elem1022); } } struct.setSuccessIsSet(true); @@ -44248,13 +45212,13 @@ import org.slf4j.LoggerFactory; case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1016 = iprot.readListBegin(); - struct.success = new ArrayList<String>(_list1016.size); - String _elem1017; - for (int _i1018 = 0; _i1018 < _list1016.size; ++_i1018) + org.apache.thrift.protocol.TList _list1024 = iprot.readListBegin(); + struct.success = new ArrayList<String>(_list1024.size); + String _elem1025; + for (int _i1026 = 0; _i1026 < _list1024.size; ++_i1026) { - _elem1017 = iprot.readString(); - struct.success.add(_elem1017); + _elem1025 = iprot.readString(); + struct.success.add(_elem1025); } iprot.readListEnd(); } @@ -44289,9 +45253,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1019 : struct.success) + for (String _iter1027 : struct.success) { - oprot.writeString(_iter1019); + oprot.writeString(_iter1027); } oprot.writeListEnd(); } @@ -44330,9 +45294,9 @@ import org.slf4j.LoggerFactory; if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1020 : struct.success) + for (String _iter1028 : struct.success) { - oprot.writeString(_iter1020); + oprot.writeString(_iter1028); } } } @@ -44347,13 +45311,13 @@ import org.slf4j.LoggerFactory; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1021 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList<String>(_list1021.size); - String _elem1022; - for (int _i1023 = 0; _i1023 < _list1021.size; ++_i1023) + org.apache.thrift.protocol.TList _list1029 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList<String>(_list1029.size); + String _elem1030; + for (int _i1031 = 0; _i1031 < _list1029.size; ++_i1031) { - _elem1022 = iprot.readString(); - struct.success.add(_elem1022); + _elem1030 = iprot.readString(); + struct.success.add(_elem1030); } } struct.setSuccessIsSet(true); @@ -48960,16 +49924,16 @@ import org.slf4j.LoggerFactory; case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1024 = iprot.readMapBegin(); - struct.success = new HashMap<String,Type>(2*_map1024.size); - String _key1025; - Type _val1026; - for (int _i1027 = 0; _i1027 < _map1024.size; ++_i1027) + org.apache.thrift.protocol.TMap _map1032 = iprot.readMapBegin(); + struct.success = new HashMap<String,Type>(2*_map1032.size); + String _key1033; + Type _val1034; + for (int _i1035 = 0; _i1035 < _map1032.size; ++_i1035) { - _key1025 = iprot.readString(); - _val1026 = new Type(); - _val1026.read(iprot); - struct.success.put(_key1025, _val1026); + _key1033 = iprot.readString(); + _val1034 = new Type(); + _val1034.read(iprot); + struct.success.put(_key1033, _val1034); } iprot.readMapEnd(); } @@ -49004,10 +49968,10 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry<String, Type> _iter1028 : struct.success.entrySet()) + for (Map.Entry<String, Type> _iter1036 : struct.success.entrySet()) { - oprot.writeString(_iter1028.getKey()); - _iter1028.getValue().write(oprot); + oprot.writeString(_iter1036.getKey()); + _iter1036.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -49046,10 +50010,10 @@ import org.slf4j.LoggerFactory; if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry<String, Type> _iter1029 : struct.success.entrySet()) + for (Map.Entry<String, Type> _iter1037 : struct.success.entrySet()) { - oprot.writeString(_iter1029.getKey()); - _iter1029.getValue().write(oprot); + oprot.writeString(_iter1037.getKey()); + _iter1037.getValue().write(oprot); } } } @@ -49064,16 +50028,16 @@ import org.slf4j.LoggerFactory; BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1030 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap<String,Type>(2*_map1030.size); - String _key1031; - Type _val1032; - for (int _i1033 = 0; _i1033 < _map1030.size; ++_i1033) + org.apache.thrift.protocol.TMap _map1038 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap<String,Type>(2*_map1038.size); + String _key1039; + Type _val1040; + for (int _i1041 = 0; _i1041 < _map1038.size; ++_i1041) { - _key1031 = iprot.readString(); - _val1032 = new Type(); - _val1032.read(iprot); - struct.success.put(_key1031, _val1032); + _key1039 = iprot.readString(); + _val1040 = new Type(); + _val1040.read(iprot); + struct.success.put(_key1039, _val1040); } } struct.setSuccessIsSet(true); @@ -50108,14 +51072,14 @@ import org.slf4j.LoggerFactory; case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1034 = iprot.readListBegin(); - struct.success = new ArrayList<FieldSchema>(_list1034.size); - FieldSchema _elem1035; - for (int _i1036 = 0; _i1036 < _list1034.size; ++_i1036) + org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); + struct.success = new ArrayList<FieldSchema>(_list1042.size); + FieldSchema _elem1043; + for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) { - _elem1035 = new FieldSchema(); - _elem1035.read(iprot); - struct.success.add(_elem1035); + _elem1043 = new FieldSchema(); + _elem1043.read(iprot); + struct.success.add(_elem1043); } iprot.readListEnd(); } @@ -50168,9 +51132,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1037 : struct.success) + for (FieldSchema _iter1045 : struct.success) { - _iter1037.write(oprot); + _iter1045.write(oprot); } oprot.writeListEnd(); } @@ -50225,9 +51189,9 @@ import org.slf4j.LoggerFactory; if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1038 : struct.success) + for (FieldSchema _iter1046 : struct.success) { - _iter1038.write(oprot); + _iter1046.write(oprot); } } } @@ -50248,14 +51212,14 @@ import org.slf4j.LoggerFactory; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1039 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList<FieldSchema>(_list1039.size); - FieldSchema _elem1040; - for (int _i1041 = 0; _i1041 < _list1039.size; ++_i1041) + org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList<FieldSchema>(_list1047.size); + FieldSchema _elem1048; + for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049) { - _elem1040 = new FieldSchema(); - _elem1040.read(iprot); - struct.success.add(_elem1040); + _elem1048 = new FieldSchema(); + _elem1048.read(iprot); + struct.success.add(_elem1048); } } struct.setSuccessIsSet(true); @@ -51409,14 +52373,14 @@ import org.slf4j.LoggerFactory; case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1042 = iprot.readListBegin(); - struct.success = new ArrayList<FieldSchema>(_list1042.size); - FieldSchema _elem1043; - for (int _i1044 = 0; _i1044 < _list1042.size; ++_i1044) + org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin(); + struct.success = new ArrayList<FieldSchema>(_list1050.size); + FieldSchema _elem1051; + for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052) { - _elem1043 = new FieldSchema(); - _elem1043.read(iprot); - struct.success.add(_elem1043); + _elem1051 = new FieldSchema(); + _elem1051.read(iprot); + struct.success.add(_elem1051); } iprot.readListEnd(); } @@ -51469,9 +52433,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1045 : struct.success) + for (FieldSchema _iter1053 : struct.success) { - _iter1045.write(oprot); + _iter1053.write(oprot); } oprot.writeListEnd(); } @@ -51526,9 +52490,9 @@ import org.slf4j.LoggerFactory; if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1046 : struct.success) + for (FieldSchema _iter1054 : struct.success) { - _iter1046.write(oprot); + _iter1054.write(oprot); } } } @@ -51549,14 +52513,14 @@ import org.slf4j.LoggerFactory; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1047 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList<FieldSchema>(_list1047.size); - FieldSchema _elem1048; - for (int _i1049 = 0; _i1049 < _list1047.size; ++_i1049) + org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList<FieldSchema>(_list1055.size); + FieldSchema _elem1056; + for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) { - _elem1048 = new FieldSchema(); - _elem1048.read(iprot); - struct.success.add(_elem1048); + _elem1056 = new FieldSchema(); + _elem1056.read(iprot); + struct.success.add(_elem1056); } } struct.setSuccessIsSet(true); @@ -52601,14 +53565,14 @@ import org.slf4j.LoggerFactory; case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1050 = iprot.readListBegin(); - struct.success = new ArrayList<FieldSchema>(_list1050.size); - FieldSchema _elem1051; - for (int _i1052 = 0; _i1052 < _list1050.size; ++_i1052) + org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); + struct.success = new ArrayList<FieldSchema>(_list1058.size); + FieldSchema _elem1059; + for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) { - _elem1051 = new FieldSchema(); - _elem1051.read(iprot); - struct.success.add(_elem1051); + _elem1059 = new FieldSchema(); + _elem1059.read(iprot); + struct.success.add(_elem1059); } iprot.readListEnd(); } @@ -52661,9 +53625,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1053 : struct.success) + for (FieldSchema _iter1061 : struct.success) { - _iter1053.write(oprot); + _iter1061.write(oprot); } oprot.writeListEnd(); } @@ -52718,9 +53682,9 @@ import org.slf4j.LoggerFactory; if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1054 : struct.success) + for (FieldSchema _iter1062 : struct.success) { - _iter1054.write(oprot); + _iter1062.write(oprot); } } } @@ -52741,14 +53705,14 @@ import org.slf4j.LoggerFactory; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1055 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList<FieldSchema>(_list1055.size); - FieldSchema _elem1056; - for (int _i1057 = 0; _i1057 < _list1055.size; ++_i1057) + org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList<FieldSchema>(_list1063.size); + FieldSchema _elem1064; + for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) { - _elem1056 = new FieldSchema(); - _elem1056.read(iprot); - struct.success.add(_elem1056); + _elem1064 = new FieldSchema(); + _elem1064.read(iprot); + struct.success.add(_elem1064); } } struct.setSuccessIsSet(true); @@ -53902,14 +54866,14 @@ import org.slf4j.LoggerFactory; case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1058 = iprot.readListBegin(); - struct.success = new ArrayList<FieldSchema>(_list1058.size); - FieldSchema _elem1059; - for (int _i1060 = 0; _i1060 < _list1058.size; ++_i1060) + org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin(); + struct.success = new ArrayList<FieldSchema>(_list1066.size); + FieldSchema _elem1067; + for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) { - _elem1059 = new FieldSchema(); - _elem1059.read(iprot); - struct.success.add(_elem1059); + _elem1067 = new FieldSchema(); + _elem1067.read(iprot); + struct.success.add(_elem1067); } iprot.readListEnd(); } @@ -53962,9 +54926,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1061 : struct.success) + for (FieldSchema _iter1069 : struct.success) { - _iter1061.write(oprot); + _iter1069.write(oprot); } oprot.writeListEnd(); } @@ -54019,9 +54983,9 @@ import org.slf4j.LoggerFactory; if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1062 : struct.success) + for (FieldSchema _iter1070 : struct.success) { - _iter1062.write(oprot); + _iter1070.write(oprot); } } } @@ -54042,14 +55006,14 @@ import org.slf4j.LoggerFactory; BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1063 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList<FieldSchema>(_list1063.size); - FieldSchema _elem1064; - for (int _i1065 = 0; _i1065 < _list1063.size; ++_i1065) + org.apache.thrift.protocol.TList _list1071 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList<FieldSchema>(_list1071.size); + FieldSchema _elem1072; + for (int _i1073 = 0; _i1073 < _list1071.size; ++_i1073) { - _elem1064 = new FieldSchema(); - _elem1064.read(iprot); - struct.success.add(_elem1064); + _elem1072 = new FieldSchema(); + _elem1072.read(iprot); + struct.success.add(_elem1072); } } struct.setSuccessIsSet(true); @@ -57178,14 +58142,14 @@ import org.slf4j.LoggerFactory; case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1066 = iprot.readListBegin(); - struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1066.size); - SQLPrimaryKey _elem1067; - for (int _i1068 = 0; _i1068 < _list1066.size; ++_i1068) + org.apache.thrift.protocol.TList _list1074 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1074.size); + SQLPrimaryKey _elem1075; + for (int _i1076 = 0; _i1076 < _list1074.size; ++_i1076) { - _elem1067 = new SQLPrimaryKey(); - _elem1067.read(iprot); - struct.primaryKeys.add(_elem1067); + _elem1075 = new SQLPrimaryKey(); + _elem1075.read(iprot); + struct.primaryKeys.add(_elem1075); } iprot.readListEnd(); } @@ -57197,14 +58161,14 @@ import org.slf4j.LoggerFactory; case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1069 = iprot.readListBegin(); - struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1069.size); - SQLForeignKey _elem1070; - for (int _i1071 = 0; _i1071 < _list1069.size; ++_i1071) + org.apache.thrift.protocol.TList _list1077 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1077.size); + SQLForeignKey _elem1078; + for (int _i1079 = 0; _i1079 < _list1077.size; ++_i1079) { - _elem1070 = new SQLForeignKey(); - _elem1070.read(iprot); - struct.foreignKeys.add(_elem1070); + _elem1078 = new SQLForeignKey(); + _elem1078.read(iprot); + struct.foreignKeys.add(_elem1078); } iprot.readListEnd(); } @@ -57216,14 +58180,14 @@ import org.slf4j.LoggerFactory; case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1072 = iprot.readListBegin(); - struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1072.size); - SQLUniqueConstraint _elem1073; - for (int _i1074 = 0; _i1074 < _list1072.size; ++_i1074) + org.apache.thrift.protocol.TList _list1080 = iprot.readListBegin(); + struct.uniqueConstraints = new ArrayList<SQLUniqueConstraint>(_list1080.size); + SQLUniqueConstraint _elem1081; + for (int _i1082 = 0; _i1082 < _list1080.size; ++_i1082) { - _elem1073 = new SQLUniqueConstraint(); - _elem1073.read(iprot); - struct.uniqueConstraints.add(_elem1073); + _elem1081 = new SQLUniqueConstraint(); + _elem1081.read(iprot); + struct.uniqueConstraints.add(_elem1081); } iprot.readListEnd(); } @@ -57235,14 +58199,14 @@ import org.slf4j.LoggerFactory; case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1075 = iprot.readListBegin(); - struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1075.size); - SQLNotNullConstraint _elem1076; - for (int _i1077 = 0; _i1077 < _list1075.size; ++_i1077) + org.apache.thrift.protocol.TList _list1083 = iprot.readListBegin(); + struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list1083.size); + SQLNotNullConstraint _elem1084; + for (int _i1085 = 0; _i1085 < _list1083.size; ++_i1085) { - _elem1076 = new SQLNotNullConstraint(); - _elem1076.read(iprot); - struct.notNullConstraints.add(_elem1076); + _elem1084 = new SQLNotNullConstraint(); + _elem1084.read(iprot); + struct.notNullConstraints.add(_elem1084); } iprot.readListEnd(); } @@ -57254,14 +58218,14 @@ import org.slf4j.LoggerFactory; case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1078 = iprot.readListBegin(); - struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1078.size); - SQLDefaultConstraint _elem1079; - for (int _i1080 = 0; _i1080 < _list1078.size; ++_i1080) + org.apache.thrift.protocol.TList _list1086 = iprot.readListBegin(); + struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list1086.size); + SQLDefaultConstraint _elem1087; + for (int _i1088 = 0; _i1088 < _list1086.size; ++_i1088) { - _elem1079 = new SQLDefaultConstraint(); - _elem1079.read(iprot); - struct.defaultConstraints.add(_elem1079); + _elem1087 = new SQLDefaultConstraint(); + _elem1087.read(iprot); + struct.defaultConstraints.add(_elem1087); } iprot.readListEnd(); } @@ -57273,14 +58237,14 @@ import org.slf4j.LoggerFactory; case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1081 = iprot.readListBegin(); - struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1081.size); - SQLCheckConstraint _elem1082; - for (int _i1083 = 0; _i1083 < _list1081.size; ++_i1083) + org.apache.thrift.protocol.TList _list1089 = iprot.readListBegin(); + struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list1089.size); + SQLCheckConstraint _elem1090; + for (int _i1091 = 0; _i1091 < _list1089.size; ++_i1091) { - _elem1082 = new SQLCheckConstraint(); - _elem1082.read(iprot); - struct.checkConstraints.add(_elem1082); + _elem1090 = new SQLCheckConstraint(); + _elem1090.read(iprot); + struct.checkConstraints.add(_elem1090); } iprot.readListEnd(); } @@ -57311,9 +58275,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1084 : struct.primaryKeys) + for (SQLPrimaryKey _iter1092 : struct.primaryKeys) { - _iter1084.write(oprot); + _iter1092.write(oprot); } oprot.writeListEnd(); } @@ -57323,9 +58287,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1085 : struct.foreignKeys) + for (SQLForeignKey _iter1093 : struct.foreignKeys) { - _iter1085.write(oprot); + _iter1093.write(oprot); } oprot.writeListEnd(); } @@ -57335,9 +58299,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1086 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1094 : struct.uniqueConstraints) { - _iter1086.write(oprot); + _iter1094.write(oprot); } oprot.writeListEnd(); } @@ -57347,9 +58311,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1087 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1095 : struct.notNullConstraints) { - _iter1087.write(oprot); + _iter1095.write(oprot); } oprot.writeListEnd(); } @@ -57359,9 +58323,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1088 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1096 : struct.defaultConstraints) { - _iter1088.write(oprot); + _iter1096.write(oprot); } oprot.writeListEnd(); } @@ -57371,9 +58335,9 @@ import org.slf4j.LoggerFactory; oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1089 : struct.checkConstraints) + for (SQLCheckConstraint _iter1097 : struct.checkConstraints) { - _iter1089.write(oprot); + _iter1097.write(oprot); } oprot.writeListEnd(); } @@ -57425,54 +58389,54 @@ import org.slf4j.LoggerFactory; if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1090 : struct.primaryKeys) + for (SQLPrimaryKey _iter1098 : struct.primaryKeys) { - _iter1090.write(oprot); + _iter1098.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1091 : struct.foreignKeys) + for (SQLForeignKey _iter1099 : struct.foreignKeys) { - _iter1091.write(oprot); + _iter1099.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1092 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1100 : struct.uniqueConstraints) { - _iter1092.write(oprot); + _iter1100.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1093 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1101 : struct.notNullConstraints) { - _iter1093.write(oprot); + _iter1101.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1094 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1102 : struct.defaultConstraints) { - _iter1094.write(oprot); + _iter1102.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1095 : struct.checkConstraints) + for (SQLCheckConstraint _iter1103 : struct.checkConstraints) { - _iter1095.write(oprot); + _iter1103.write(oprot); } } } @@ -57489,84 +58453,84 @@ import org.slf4j.LoggerFactory; } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1096 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1096.size); - SQLPrimaryKey _elem1097; - for (int _i1098 = 0; _i1098 < _list1096.size; ++_i1098) + org.apache.thrift.protocol.TList _list1104 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list1104.size); + SQLPrimaryKey _elem1105; + for (int _i1106 = 0; _i1106 < _list1104.size; ++_i1106) { - _elem1097 = new SQLPrimaryKey(); - _elem1097.read(iprot); - struct.primaryKeys.add(_elem1097); + _elem1105 = new SQLPrimaryKey(); + _elem1105.read(iprot); + struct.primaryKeys.add(_elem1105); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1099 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1099.size); - SQLForeignKey _elem1100; - for (int _i1101 = 0; _i1101 < _list1099.size; ++_i1101) + org.apache.thrift.protocol.TList _list1107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList<SQLForeignKey>(_list1107.size); + SQLForeignKey _elem1108; + for (int _i1109 = 0; _i1109 < _list1107.size; ++_i1109) { - _elem1100 = new SQLForeignKey(); - _elem1100.read(iprot); - struct.foreignKeys.add(_elem1100); + _elem1108 = new SQLForeignKey(); + _elem1108.read(iprot); + struct.foreignKeys.add(_elem1108); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1102 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.uniqueConstraints = new ArrayList<SQLUniqueCo
<TRUNCATED>