Repository: hive
Updated Branches:
  refs/heads/branch-2 15adcd618 -> dec4efd3c


http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py 
b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 3faf1bb..4c305cb 100644
--- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@ -7798,6 +7798,337 @@ class DropPartitionsRequest:
   def __ne__(self, other):
     return not (self == other)
 
+class PartitionValuesRequest:
+  """
+  Attributes:
+   - dbName
+   - tblName
+   - partitionKeys
+   - applyDistinct
+   - filter
+   - partitionOrder
+   - ascending
+   - maxParts
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'dbName', None, None, ), # 1
+    (2, TType.STRING, 'tblName', None, None, ), # 2
+    (3, TType.LIST, 'partitionKeys', (TType.STRUCT,(FieldSchema, 
FieldSchema.thrift_spec)), None, ), # 3
+    (4, TType.BOOL, 'applyDistinct', None, True, ), # 4
+    (5, TType.STRING, 'filter', None, None, ), # 5
+    (6, TType.LIST, 'partitionOrder', (TType.STRUCT,(FieldSchema, 
FieldSchema.thrift_spec)), None, ), # 6
+    (7, TType.BOOL, 'ascending', None, True, ), # 7
+    (8, TType.I64, 'maxParts', None, -1, ), # 8
+  )
+
+  def __init__(self, dbName=None, tblName=None, partitionKeys=None, 
applyDistinct=thrift_spec[4][4], filter=None, partitionOrder=None, 
ascending=thrift_spec[7][4], maxParts=thrift_spec[8][4],):
+    self.dbName = dbName
+    self.tblName = tblName
+    self.partitionKeys = partitionKeys
+    self.applyDistinct = applyDistinct
+    self.filter = filter
+    self.partitionOrder = partitionOrder
+    self.ascending = ascending
+    self.maxParts = maxParts
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, 
self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.dbName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.tblName = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.LIST:
+          self.partitionKeys = []
+          (_etype402, _size399) = iprot.readListBegin()
+          for _i403 in xrange(_size399):
+            _elem404 = FieldSchema()
+            _elem404.read(iprot)
+            self.partitionKeys.append(_elem404)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.BOOL:
+          self.applyDistinct = iprot.readBool()
+        else:
+          iprot.skip(ftype)
+      elif fid == 5:
+        if ftype == TType.STRING:
+          self.filter = iprot.readString()
+        else:
+          iprot.skip(ftype)
+      elif fid == 6:
+        if ftype == TType.LIST:
+          self.partitionOrder = []
+          (_etype408, _size405) = iprot.readListBegin()
+          for _i409 in xrange(_size405):
+            _elem410 = FieldSchema()
+            _elem410.read(iprot)
+            self.partitionOrder.append(_elem410)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 7:
+        if ftype == TType.BOOL:
+          self.ascending = iprot.readBool()
+        else:
+          iprot.skip(ftype)
+      elif fid == 8:
+        if ftype == TType.I64:
+          self.maxParts = iprot.readI64()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, 
self.thrift_spec)))
+      return
+    oprot.writeStructBegin('PartitionValuesRequest')
+    if self.dbName is not None:
+      oprot.writeFieldBegin('dbName', TType.STRING, 1)
+      oprot.writeString(self.dbName)
+      oprot.writeFieldEnd()
+    if self.tblName is not None:
+      oprot.writeFieldBegin('tblName', TType.STRING, 2)
+      oprot.writeString(self.tblName)
+      oprot.writeFieldEnd()
+    if self.partitionKeys is not None:
+      oprot.writeFieldBegin('partitionKeys', TType.LIST, 3)
+      oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys))
+      for iter411 in self.partitionKeys:
+        iter411.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.applyDistinct is not None:
+      oprot.writeFieldBegin('applyDistinct', TType.BOOL, 4)
+      oprot.writeBool(self.applyDistinct)
+      oprot.writeFieldEnd()
+    if self.filter is not None:
+      oprot.writeFieldBegin('filter', TType.STRING, 5)
+      oprot.writeString(self.filter)
+      oprot.writeFieldEnd()
+    if self.partitionOrder is not None:
+      oprot.writeFieldBegin('partitionOrder', TType.LIST, 6)
+      oprot.writeListBegin(TType.STRUCT, len(self.partitionOrder))
+      for iter412 in self.partitionOrder:
+        iter412.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.ascending is not None:
+      oprot.writeFieldBegin('ascending', TType.BOOL, 7)
+      oprot.writeBool(self.ascending)
+      oprot.writeFieldEnd()
+    if self.maxParts is not None:
+      oprot.writeFieldBegin('maxParts', TType.I64, 8)
+      oprot.writeI64(self.maxParts)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.dbName is None:
+      raise TProtocol.TProtocolException(message='Required field dbName is 
unset!')
+    if self.tblName is None:
+      raise TProtocol.TProtocolException(message='Required field tblName is 
unset!')
+    if self.partitionKeys is None:
+      raise TProtocol.TProtocolException(message='Required field partitionKeys 
is unset!')
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.dbName)
+    value = (value * 31) ^ hash(self.tblName)
+    value = (value * 31) ^ hash(self.partitionKeys)
+    value = (value * 31) ^ hash(self.applyDistinct)
+    value = (value * 31) ^ hash(self.filter)
+    value = (value * 31) ^ hash(self.partitionOrder)
+    value = (value * 31) ^ hash(self.ascending)
+    value = (value * 31) ^ hash(self.maxParts)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == 
other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class PartitionValuesRow:
+  """
+  Attributes:
+   - row
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'row', (TType.STRING,None), None, ), # 1
+  )
+
+  def __init__(self, row=None,):
+    self.row = row
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, 
self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.row = []
+          (_etype416, _size413) = iprot.readListBegin()
+          for _i417 in xrange(_size413):
+            _elem418 = iprot.readString()
+            self.row.append(_elem418)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, 
self.thrift_spec)))
+      return
+    oprot.writeStructBegin('PartitionValuesRow')
+    if self.row is not None:
+      oprot.writeFieldBegin('row', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRING, len(self.row))
+      for iter419 in self.row:
+        oprot.writeString(iter419)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.row is None:
+      raise TProtocol.TProtocolException(message='Required field row is 
unset!')
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.row)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == 
other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class PartitionValuesResponse:
+  """
+  Attributes:
+   - partitionValues
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.LIST, 'partitionValues', (TType.STRUCT,(PartitionValuesRow, 
PartitionValuesRow.thrift_spec)), None, ), # 1
+  )
+
+  def __init__(self, partitionValues=None,):
+    self.partitionValues = partitionValues
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is 
not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, 
self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.LIST:
+          self.partitionValues = []
+          (_etype423, _size420) = iprot.readListBegin()
+          for _i424 in xrange(_size420):
+            _elem425 = PartitionValuesRow()
+            _elem425.read(iprot)
+            self.partitionValues.append(_elem425)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and 
self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, 
self.thrift_spec)))
+      return
+    oprot.writeStructBegin('PartitionValuesResponse')
+    if self.partitionValues is not None:
+      oprot.writeFieldBegin('partitionValues', TType.LIST, 1)
+      oprot.writeListBegin(TType.STRUCT, len(self.partitionValues))
+      for iter426 in self.partitionValues:
+        iter426.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def validate(self):
+    if self.partitionValues is None:
+      raise TProtocol.TProtocolException(message='Required field 
partitionValues is unset!')
+    return
+
+
+  def __hash__(self):
+    value = 17
+    value = (value * 31) ^ hash(self.partitionValues)
+    return value
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == 
other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class ResourceUri:
   """
   Attributes:
@@ -7958,11 +8289,11 @@ class Function:
       elif fid == 8:
         if ftype == TType.LIST:
           self.resourceUris = []
-          (_etype402, _size399) = iprot.readListBegin()
-          for _i403 in xrange(_size399):
-            _elem404 = ResourceUri()
-            _elem404.read(iprot)
-            self.resourceUris.append(_elem404)
+          (_etype430, _size427) = iprot.readListBegin()
+          for _i431 in xrange(_size427):
+            _elem432 = ResourceUri()
+            _elem432.read(iprot)
+            self.resourceUris.append(_elem432)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -8007,8 +8338,8 @@ class Function:
     if self.resourceUris is not None:
       oprot.writeFieldBegin('resourceUris', TType.LIST, 8)
       oprot.writeListBegin(TType.STRUCT, len(self.resourceUris))
-      for iter405 in self.resourceUris:
-        iter405.write(oprot)
+      for iter433 in self.resourceUris:
+        iter433.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -8252,11 +8583,11 @@ class GetOpenTxnsInfoResponse:
       elif fid == 2:
         if ftype == TType.LIST:
           self.open_txns = []
-          (_etype409, _size406) = iprot.readListBegin()
-          for _i410 in xrange(_size406):
-            _elem411 = TxnInfo()
-            _elem411.read(iprot)
-            self.open_txns.append(_elem411)
+          (_etype437, _size434) = iprot.readListBegin()
+          for _i438 in xrange(_size434):
+            _elem439 = TxnInfo()
+            _elem439.read(iprot)
+            self.open_txns.append(_elem439)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -8277,8 +8608,8 @@ class GetOpenTxnsInfoResponse:
     if self.open_txns is not None:
       oprot.writeFieldBegin('open_txns', TType.LIST, 2)
       oprot.writeListBegin(TType.STRUCT, len(self.open_txns))
-      for iter412 in self.open_txns:
-        iter412.write(oprot)
+      for iter440 in self.open_txns:
+        iter440.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -8346,10 +8677,10 @@ class GetOpenTxnsResponse:
       elif fid == 2:
         if ftype == TType.SET:
           self.open_txns = set()
-          (_etype416, _size413) = iprot.readSetBegin()
-          for _i417 in xrange(_size413):
-            _elem418 = iprot.readI64()
-            self.open_txns.add(_elem418)
+          (_etype444, _size441) = iprot.readSetBegin()
+          for _i445 in xrange(_size441):
+            _elem446 = iprot.readI64()
+            self.open_txns.add(_elem446)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -8375,8 +8706,8 @@ class GetOpenTxnsResponse:
     if self.open_txns is not None:
       oprot.writeFieldBegin('open_txns', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.open_txns))
-      for iter419 in self.open_txns:
-        oprot.writeI64(iter419)
+      for iter447 in self.open_txns:
+        oprot.writeI64(iter447)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     if self.min_open_txn is not None:
@@ -8548,10 +8879,10 @@ class OpenTxnsResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.txn_ids = []
-          (_etype423, _size420) = iprot.readListBegin()
-          for _i424 in xrange(_size420):
-            _elem425 = iprot.readI64()
-            self.txn_ids.append(_elem425)
+          (_etype451, _size448) = iprot.readListBegin()
+          for _i452 in xrange(_size448):
+            _elem453 = iprot.readI64()
+            self.txn_ids.append(_elem453)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -8568,8 +8899,8 @@ class OpenTxnsResponse:
     if self.txn_ids is not None:
       oprot.writeFieldBegin('txn_ids', TType.LIST, 1)
       oprot.writeListBegin(TType.I64, len(self.txn_ids))
-      for iter426 in self.txn_ids:
-        oprot.writeI64(iter426)
+      for iter454 in self.txn_ids:
+        oprot.writeI64(iter454)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -8690,10 +9021,10 @@ class AbortTxnsRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.txn_ids = []
-          (_etype430, _size427) = iprot.readListBegin()
-          for _i431 in xrange(_size427):
-            _elem432 = iprot.readI64()
-            self.txn_ids.append(_elem432)
+          (_etype458, _size455) = iprot.readListBegin()
+          for _i459 in xrange(_size455):
+            _elem460 = iprot.readI64()
+            self.txn_ids.append(_elem460)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -8710,8 +9041,8 @@ class AbortTxnsRequest:
     if self.txn_ids is not None:
       oprot.writeFieldBegin('txn_ids', TType.LIST, 1)
       oprot.writeListBegin(TType.I64, len(self.txn_ids))
-      for iter433 in self.txn_ids:
-        oprot.writeI64(iter433)
+      for iter461 in self.txn_ids:
+        oprot.writeI64(iter461)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -9006,11 +9337,11 @@ class LockRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.component = []
-          (_etype437, _size434) = iprot.readListBegin()
-          for _i438 in xrange(_size434):
-            _elem439 = LockComponent()
-            _elem439.read(iprot)
-            self.component.append(_elem439)
+          (_etype465, _size462) = iprot.readListBegin()
+          for _i466 in xrange(_size462):
+            _elem467 = LockComponent()
+            _elem467.read(iprot)
+            self.component.append(_elem467)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -9047,8 +9378,8 @@ class LockRequest:
     if self.component is not None:
       oprot.writeFieldBegin('component', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.component))
-      for iter440 in self.component:
-        iter440.write(oprot)
+      for iter468 in self.component:
+        iter468.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.txnid is not None:
@@ -9746,11 +10077,11 @@ class ShowLocksResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.locks = []
-          (_etype444, _size441) = iprot.readListBegin()
-          for _i445 in xrange(_size441):
-            _elem446 = ShowLocksResponseElement()
-            _elem446.read(iprot)
-            self.locks.append(_elem446)
+          (_etype472, _size469) = iprot.readListBegin()
+          for _i473 in xrange(_size469):
+            _elem474 = ShowLocksResponseElement()
+            _elem474.read(iprot)
+            self.locks.append(_elem474)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -9767,8 +10098,8 @@ class ShowLocksResponse:
     if self.locks is not None:
       oprot.writeFieldBegin('locks', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.locks))
-      for iter447 in self.locks:
-        iter447.write(oprot)
+      for iter475 in self.locks:
+        iter475.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -9983,20 +10314,20 @@ class HeartbeatTxnRangeResponse:
       if fid == 1:
         if ftype == TType.SET:
           self.aborted = set()
-          (_etype451, _size448) = iprot.readSetBegin()
-          for _i452 in xrange(_size448):
-            _elem453 = iprot.readI64()
-            self.aborted.add(_elem453)
+          (_etype479, _size476) = iprot.readSetBegin()
+          for _i480 in xrange(_size476):
+            _elem481 = iprot.readI64()
+            self.aborted.add(_elem481)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.SET:
           self.nosuch = set()
-          (_etype457, _size454) = iprot.readSetBegin()
-          for _i458 in xrange(_size454):
-            _elem459 = iprot.readI64()
-            self.nosuch.add(_elem459)
+          (_etype485, _size482) = iprot.readSetBegin()
+          for _i486 in xrange(_size482):
+            _elem487 = iprot.readI64()
+            self.nosuch.add(_elem487)
           iprot.readSetEnd()
         else:
           iprot.skip(ftype)
@@ -10013,15 +10344,15 @@ class HeartbeatTxnRangeResponse:
     if self.aborted is not None:
       oprot.writeFieldBegin('aborted', TType.SET, 1)
       oprot.writeSetBegin(TType.I64, len(self.aborted))
-      for iter460 in self.aborted:
-        oprot.writeI64(iter460)
+      for iter488 in self.aborted:
+        oprot.writeI64(iter488)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     if self.nosuch is not None:
       oprot.writeFieldBegin('nosuch', TType.SET, 2)
       oprot.writeSetBegin(TType.I64, len(self.nosuch))
-      for iter461 in self.nosuch:
-        oprot.writeI64(iter461)
+      for iter489 in self.nosuch:
+        oprot.writeI64(iter489)
       oprot.writeSetEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -10118,11 +10449,11 @@ class CompactionRequest:
       elif fid == 6:
         if ftype == TType.MAP:
           self.properties = {}
-          (_ktype463, _vtype464, _size462 ) = iprot.readMapBegin()
-          for _i466 in xrange(_size462):
-            _key467 = iprot.readString()
-            _val468 = iprot.readString()
-            self.properties[_key467] = _val468
+          (_ktype491, _vtype492, _size490 ) = iprot.readMapBegin()
+          for _i494 in xrange(_size490):
+            _key495 = iprot.readString()
+            _val496 = iprot.readString()
+            self.properties[_key495] = _val496
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -10159,9 +10490,9 @@ class CompactionRequest:
     if self.properties is not None:
       oprot.writeFieldBegin('properties', TType.MAP, 6)
       oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties))
-      for kiter469,viter470 in self.properties.items():
-        oprot.writeString(kiter469)
-        oprot.writeString(viter470)
+      for kiter497,viter498 in self.properties.items():
+        oprot.writeString(kiter497)
+        oprot.writeString(viter498)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -10596,11 +10927,11 @@ class ShowCompactResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.compacts = []
-          (_etype474, _size471) = iprot.readListBegin()
-          for _i475 in xrange(_size471):
-            _elem476 = ShowCompactResponseElement()
-            _elem476.read(iprot)
-            self.compacts.append(_elem476)
+          (_etype502, _size499) = iprot.readListBegin()
+          for _i503 in xrange(_size499):
+            _elem504 = ShowCompactResponseElement()
+            _elem504.read(iprot)
+            self.compacts.append(_elem504)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -10617,8 +10948,8 @@ class ShowCompactResponse:
     if self.compacts is not None:
       oprot.writeFieldBegin('compacts', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.compacts))
-      for iter477 in self.compacts:
-        iter477.write(oprot)
+      for iter505 in self.compacts:
+        iter505.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -10699,10 +11030,10 @@ class AddDynamicPartitions:
       elif fid == 4:
         if ftype == TType.LIST:
           self.partitionnames = []
-          (_etype481, _size478) = iprot.readListBegin()
-          for _i482 in xrange(_size478):
-            _elem483 = iprot.readString()
-            self.partitionnames.append(_elem483)
+          (_etype509, _size506) = iprot.readListBegin()
+          for _i510 in xrange(_size506):
+            _elem511 = iprot.readString()
+            self.partitionnames.append(_elem511)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -10736,8 +11067,8 @@ class AddDynamicPartitions:
     if self.partitionnames is not None:
       oprot.writeFieldBegin('partitionnames', TType.LIST, 4)
       oprot.writeListBegin(TType.STRING, len(self.partitionnames))
-      for iter484 in self.partitionnames:
-        oprot.writeString(iter484)
+      for iter512 in self.partitionnames:
+        oprot.writeString(iter512)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.operationType is not None:
@@ -11036,11 +11367,11 @@ class NotificationEventResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.events = []
-          (_etype488, _size485) = iprot.readListBegin()
-          for _i489 in xrange(_size485):
-            _elem490 = NotificationEvent()
-            _elem490.read(iprot)
-            self.events.append(_elem490)
+          (_etype516, _size513) = iprot.readListBegin()
+          for _i517 in xrange(_size513):
+            _elem518 = NotificationEvent()
+            _elem518.read(iprot)
+            self.events.append(_elem518)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -11057,8 +11388,8 @@ class NotificationEventResponse:
     if self.events is not None:
       oprot.writeFieldBegin('events', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.events))
-      for iter491 in self.events:
-        iter491.write(oprot)
+      for iter519 in self.events:
+        iter519.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -11182,20 +11513,20 @@ class InsertEventRequestData:
       if fid == 1:
         if ftype == TType.LIST:
           self.filesAdded = []
-          (_etype495, _size492) = iprot.readListBegin()
-          for _i496 in xrange(_size492):
-            _elem497 = iprot.readString()
-            self.filesAdded.append(_elem497)
+          (_etype523, _size520) = iprot.readListBegin()
+          for _i524 in xrange(_size520):
+            _elem525 = iprot.readString()
+            self.filesAdded.append(_elem525)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.LIST:
           self.filesAddedChecksum = []
-          (_etype501, _size498) = iprot.readListBegin()
-          for _i502 in xrange(_size498):
-            _elem503 = iprot.readString()
-            self.filesAddedChecksum.append(_elem503)
+          (_etype529, _size526) = iprot.readListBegin()
+          for _i530 in xrange(_size526):
+            _elem531 = iprot.readString()
+            self.filesAddedChecksum.append(_elem531)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -11212,15 +11543,15 @@ class InsertEventRequestData:
     if self.filesAdded is not None:
       oprot.writeFieldBegin('filesAdded', TType.LIST, 1)
       oprot.writeListBegin(TType.STRING, len(self.filesAdded))
-      for iter504 in self.filesAdded:
-        oprot.writeString(iter504)
+      for iter532 in self.filesAdded:
+        oprot.writeString(iter532)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.filesAddedChecksum is not None:
       oprot.writeFieldBegin('filesAddedChecksum', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.filesAddedChecksum))
-      for iter505 in self.filesAddedChecksum:
-        oprot.writeString(iter505)
+      for iter533 in self.filesAddedChecksum:
+        oprot.writeString(iter533)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -11374,10 +11705,10 @@ class FireEventRequest:
       elif fid == 5:
         if ftype == TType.LIST:
           self.partitionVals = []
-          (_etype509, _size506) = iprot.readListBegin()
-          for _i510 in xrange(_size506):
-            _elem511 = iprot.readString()
-            self.partitionVals.append(_elem511)
+          (_etype537, _size534) = iprot.readListBegin()
+          for _i538 in xrange(_size534):
+            _elem539 = iprot.readString()
+            self.partitionVals.append(_elem539)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -11410,8 +11741,8 @@ class FireEventRequest:
     if self.partitionVals is not None:
       oprot.writeFieldBegin('partitionVals', TType.LIST, 5)
       oprot.writeListBegin(TType.STRING, len(self.partitionVals))
-      for iter512 in self.partitionVals:
-        oprot.writeString(iter512)
+      for iter540 in self.partitionVals:
+        oprot.writeString(iter540)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -11598,12 +11929,12 @@ class GetFileMetadataByExprResult:
       if fid == 1:
         if ftype == TType.MAP:
           self.metadata = {}
-          (_ktype514, _vtype515, _size513 ) = iprot.readMapBegin()
-          for _i517 in xrange(_size513):
-            _key518 = iprot.readI64()
-            _val519 = MetadataPpdResult()
-            _val519.read(iprot)
-            self.metadata[_key518] = _val519
+          (_ktype542, _vtype543, _size541 ) = iprot.readMapBegin()
+          for _i545 in xrange(_size541):
+            _key546 = iprot.readI64()
+            _val547 = MetadataPpdResult()
+            _val547.read(iprot)
+            self.metadata[_key546] = _val547
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -11625,9 +11956,9 @@ class GetFileMetadataByExprResult:
     if self.metadata is not None:
       oprot.writeFieldBegin('metadata', TType.MAP, 1)
       oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata))
-      for kiter520,viter521 in self.metadata.items():
-        oprot.writeI64(kiter520)
-        viter521.write(oprot)
+      for kiter548,viter549 in self.metadata.items():
+        oprot.writeI64(kiter548)
+        viter549.write(oprot)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.isSupported is not None:
@@ -11697,10 +12028,10 @@ class GetFileMetadataByExprRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.fileIds = []
-          (_etype525, _size522) = iprot.readListBegin()
-          for _i526 in xrange(_size522):
-            _elem527 = iprot.readI64()
-            self.fileIds.append(_elem527)
+          (_etype553, _size550) = iprot.readListBegin()
+          for _i554 in xrange(_size550):
+            _elem555 = iprot.readI64()
+            self.fileIds.append(_elem555)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -11732,8 +12063,8 @@ class GetFileMetadataByExprRequest:
     if self.fileIds is not None:
       oprot.writeFieldBegin('fileIds', TType.LIST, 1)
       oprot.writeListBegin(TType.I64, len(self.fileIds))
-      for iter528 in self.fileIds:
-        oprot.writeI64(iter528)
+      for iter556 in self.fileIds:
+        oprot.writeI64(iter556)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.expr is not None:
@@ -11807,11 +12138,11 @@ class GetFileMetadataResult:
       if fid == 1:
         if ftype == TType.MAP:
           self.metadata = {}
-          (_ktype530, _vtype531, _size529 ) = iprot.readMapBegin()
-          for _i533 in xrange(_size529):
-            _key534 = iprot.readI64()
-            _val535 = iprot.readString()
-            self.metadata[_key534] = _val535
+          (_ktype558, _vtype559, _size557 ) = iprot.readMapBegin()
+          for _i561 in xrange(_size557):
+            _key562 = iprot.readI64()
+            _val563 = iprot.readString()
+            self.metadata[_key562] = _val563
           iprot.readMapEnd()
         else:
           iprot.skip(ftype)
@@ -11833,9 +12164,9 @@ class GetFileMetadataResult:
     if self.metadata is not None:
       oprot.writeFieldBegin('metadata', TType.MAP, 1)
       oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata))
-      for kiter536,viter537 in self.metadata.items():
-        oprot.writeI64(kiter536)
-        oprot.writeString(viter537)
+      for kiter564,viter565 in self.metadata.items():
+        oprot.writeI64(kiter564)
+        oprot.writeString(viter565)
       oprot.writeMapEnd()
       oprot.writeFieldEnd()
     if self.isSupported is not None:
@@ -11896,10 +12227,10 @@ class GetFileMetadataRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.fileIds = []
-          (_etype541, _size538) = iprot.readListBegin()
-          for _i542 in xrange(_size538):
-            _elem543 = iprot.readI64()
-            self.fileIds.append(_elem543)
+          (_etype569, _size566) = iprot.readListBegin()
+          for _i570 in xrange(_size566):
+            _elem571 = iprot.readI64()
+            self.fileIds.append(_elem571)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -11916,8 +12247,8 @@ class GetFileMetadataRequest:
     if self.fileIds is not None:
       oprot.writeFieldBegin('fileIds', TType.LIST, 1)
       oprot.writeListBegin(TType.I64, len(self.fileIds))
-      for iter544 in self.fileIds:
-        oprot.writeI64(iter544)
+      for iter572 in self.fileIds:
+        oprot.writeI64(iter572)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -12023,20 +12354,20 @@ class PutFileMetadataRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.fileIds = []
-          (_etype548, _size545) = iprot.readListBegin()
-          for _i549 in xrange(_size545):
-            _elem550 = iprot.readI64()
-            self.fileIds.append(_elem550)
+          (_etype576, _size573) = iprot.readListBegin()
+          for _i577 in xrange(_size573):
+            _elem578 = iprot.readI64()
+            self.fileIds.append(_elem578)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
       elif fid == 2:
         if ftype == TType.LIST:
           self.metadata = []
-          (_etype554, _size551) = iprot.readListBegin()
-          for _i555 in xrange(_size551):
-            _elem556 = iprot.readString()
-            self.metadata.append(_elem556)
+          (_etype582, _size579) = iprot.readListBegin()
+          for _i583 in xrange(_size579):
+            _elem584 = iprot.readString()
+            self.metadata.append(_elem584)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12058,15 +12389,15 @@ class PutFileMetadataRequest:
     if self.fileIds is not None:
       oprot.writeFieldBegin('fileIds', TType.LIST, 1)
       oprot.writeListBegin(TType.I64, len(self.fileIds))
-      for iter557 in self.fileIds:
-        oprot.writeI64(iter557)
+      for iter585 in self.fileIds:
+        oprot.writeI64(iter585)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.metadata is not None:
       oprot.writeFieldBegin('metadata', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.metadata))
-      for iter558 in self.metadata:
-        oprot.writeString(iter558)
+      for iter586 in self.metadata:
+        oprot.writeString(iter586)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.type is not None:
@@ -12174,10 +12505,10 @@ class ClearFileMetadataRequest:
       if fid == 1:
         if ftype == TType.LIST:
           self.fileIds = []
-          (_etype562, _size559) = iprot.readListBegin()
-          for _i563 in xrange(_size559):
-            _elem564 = iprot.readI64()
-            self.fileIds.append(_elem564)
+          (_etype590, _size587) = iprot.readListBegin()
+          for _i591 in xrange(_size587):
+            _elem592 = iprot.readI64()
+            self.fileIds.append(_elem592)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12194,8 +12525,8 @@ class ClearFileMetadataRequest:
     if self.fileIds is not None:
       oprot.writeFieldBegin('fileIds', TType.LIST, 1)
       oprot.writeListBegin(TType.I64, len(self.fileIds))
-      for iter565 in self.fileIds:
-        oprot.writeI64(iter565)
+      for iter593 in self.fileIds:
+        oprot.writeI64(iter593)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -12424,11 +12755,11 @@ class GetAllFunctionsResponse:
       if fid == 1:
         if ftype == TType.LIST:
           self.functions = []
-          (_etype569, _size566) = iprot.readListBegin()
-          for _i570 in xrange(_size566):
-            _elem571 = Function()
-            _elem571.read(iprot)
-            self.functions.append(_elem571)
+          (_etype597, _size594) = iprot.readListBegin()
+          for _i598 in xrange(_size594):
+            _elem599 = Function()
+            _elem599.read(iprot)
+            self.functions.append(_elem599)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12445,8 +12776,8 @@ class GetAllFunctionsResponse:
     if self.functions is not None:
       oprot.writeFieldBegin('functions', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.functions))
-      for iter572 in self.functions:
-        iter572.write(oprot)
+      for iter600 in self.functions:
+        iter600.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -12498,10 +12829,10 @@ class ClientCapabilities:
       if fid == 1:
         if ftype == TType.LIST:
           self.values = []
-          (_etype576, _size573) = iprot.readListBegin()
-          for _i577 in xrange(_size573):
-            _elem578 = iprot.readI32()
-            self.values.append(_elem578)
+          (_etype604, _size601) = iprot.readListBegin()
+          for _i605 in xrange(_size601):
+            _elem606 = iprot.readI32()
+            self.values.append(_elem606)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12518,8 +12849,8 @@ class ClientCapabilities:
     if self.values is not None:
       oprot.writeFieldBegin('values', TType.LIST, 1)
       oprot.writeListBegin(TType.I32, len(self.values))
-      for iter579 in self.values:
-        oprot.writeI32(iter579)
+      for iter607 in self.values:
+        oprot.writeI32(iter607)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()
@@ -12748,10 +13079,10 @@ class GetTablesRequest:
       elif fid == 2:
         if ftype == TType.LIST:
           self.tblNames = []
-          (_etype583, _size580) = iprot.readListBegin()
-          for _i584 in xrange(_size580):
-            _elem585 = iprot.readString()
-            self.tblNames.append(_elem585)
+          (_etype611, _size608) = iprot.readListBegin()
+          for _i612 in xrange(_size608):
+            _elem613 = iprot.readString()
+            self.tblNames.append(_elem613)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12778,8 +13109,8 @@ class GetTablesRequest:
     if self.tblNames is not None:
       oprot.writeFieldBegin('tblNames', TType.LIST, 2)
       oprot.writeListBegin(TType.STRING, len(self.tblNames))
-      for iter586 in self.tblNames:
-        oprot.writeString(iter586)
+      for iter614 in self.tblNames:
+        oprot.writeString(iter614)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     if self.capabilities is not None:
@@ -12839,11 +13170,11 @@ class GetTablesResult:
       if fid == 1:
         if ftype == TType.LIST:
           self.tables = []
-          (_etype590, _size587) = iprot.readListBegin()
-          for _i591 in xrange(_size587):
-            _elem592 = Table()
-            _elem592.read(iprot)
-            self.tables.append(_elem592)
+          (_etype618, _size615) = iprot.readListBegin()
+          for _i619 in xrange(_size615):
+            _elem620 = Table()
+            _elem620.read(iprot)
+            self.tables.append(_elem620)
           iprot.readListEnd()
         else:
           iprot.skip(ftype)
@@ -12860,8 +13191,8 @@ class GetTablesResult:
     if self.tables is not None:
       oprot.writeFieldBegin('tables', TType.LIST, 1)
       oprot.writeListBegin(TType.STRUCT, len(self.tables))
-      for iter593 in self.tables:
-        iter593.write(oprot)
+      for iter621 in self.tables:
+        iter621.write(oprot)
       oprot.writeListEnd()
       oprot.writeFieldEnd()
     oprot.writeFieldStop()

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb 
b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 5342451..8f550e8 100644
--- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@ -1735,6 +1735,73 @@ class DropPartitionsRequest
   ::Thrift::Struct.generate_accessors self
 end
 
+class PartitionValuesRequest
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  DBNAME = 1
+  TBLNAME = 2
+  PARTITIONKEYS = 3
+  APPLYDISTINCT = 4
+  FILTER = 5
+  PARTITIONORDER = 6
+  ASCENDING = 7
+  MAXPARTS = 8
+
+  FIELDS = {
+    DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+    TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
+    PARTITIONKEYS => {:type => ::Thrift::Types::LIST, :name => 
'partitionKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => 
::FieldSchema}},
+    APPLYDISTINCT => {:type => ::Thrift::Types::BOOL, :name => 
'applyDistinct', :default => true, :optional => true},
+    FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter', :optional 
=> true},
+    PARTITIONORDER => {:type => ::Thrift::Types::LIST, :name => 
'partitionOrder', :element => {:type => ::Thrift::Types::STRUCT, :class => 
::FieldSchema}, :optional => true},
+    ASCENDING => {:type => ::Thrift::Types::BOOL, :name => 'ascending', 
:default => true, :optional => true},
+    MAXPARTS => {:type => ::Thrift::Types::I64, :name => 'maxParts', :default 
=> -1, :optional => true}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise 
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required 
field dbName is unset!') unless @dbName
+    raise 
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required 
field tblName is unset!') unless @tblName
+    raise 
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required 
field partitionKeys is unset!') unless @partitionKeys
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class PartitionValuesRow
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  ROW = 1
+
+  FIELDS = {
+    ROW => {:type => ::Thrift::Types::LIST, :name => 'row', :element => {:type 
=> ::Thrift::Types::STRING}}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise 
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required 
field row is unset!') unless @row
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
+class PartitionValuesResponse
+  include ::Thrift::Struct, ::Thrift::Struct_Union
+  PARTITIONVALUES = 1
+
+  FIELDS = {
+    PARTITIONVALUES => {:type => ::Thrift::Types::LIST, :name => 
'partitionValues', :element => {:type => ::Thrift::Types::STRUCT, :class => 
::PartitionValuesRow}}
+  }
+
+  def struct_fields; FIELDS; end
+
+  def validate
+    raise 
::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required 
field partitionValues is unset!') unless @partitionValues
+  end
+
+  ::Thrift::Struct.generate_accessors self
+end
+
 class ResourceUri
   include ::Thrift::Struct, ::Thrift::Struct_Union
   RESOURCETYPE = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb 
b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 7cdfc86..2e22eaf 100644
--- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@ -1016,6 +1016,23 @@ module ThriftHiveMetastore
       raise 
::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT,
 'get_partition_names failed: unknown result')
     end
 
+    def get_partition_values(request)
+      send_get_partition_values(request)
+      return recv_get_partition_values()
+    end
+
+    def send_get_partition_values(request)
+      send_message('get_partition_values', Get_partition_values_args, :request 
=> request)
+    end
+
+    def recv_get_partition_values()
+      result = receive_message(Get_partition_values_result)
+      return result.success unless result.success.nil?
+      raise result.o1 unless result.o1.nil?
+      raise result.o2 unless result.o2.nil?
+      raise 
::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT,
 'get_partition_values failed: unknown result')
+    end
+
     def get_partitions_ps(db_name, tbl_name, part_vals, max_parts)
       send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts)
       return recv_get_partitions_ps()
@@ -3349,6 +3366,19 @@ module ThriftHiveMetastore
       write_result(result, oprot, 'get_partition_names', seqid)
     end
 
+    def process_get_partition_values(seqid, iprot, oprot)
+      args = read_args(iprot, Get_partition_values_args)
+      result = Get_partition_values_result.new()
+      begin
+        result.success = @handler.get_partition_values(args.request)
+      rescue ::MetaException => o1
+        result.o1 = o1
+      rescue ::NoSuchObjectException => o2
+        result.o2 = o2
+      end
+      write_result(result, oprot, 'get_partition_values', seqid)
+    end
+
     def process_get_partitions_ps(seqid, iprot, oprot)
       args = read_args(iprot, Get_partitions_ps_args)
       result = Get_partitions_ps_result.new()
@@ -6732,6 +6762,42 @@ module ThriftHiveMetastore
     ::Thrift::Struct.generate_accessors self
   end
 
+  class Get_partition_values_args
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    REQUEST = 1
+
+    FIELDS = {
+      REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class 
=> ::PartitionValuesRequest}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
+  class Get_partition_values_result
+    include ::Thrift::Struct, ::Thrift::Struct_Union
+    SUCCESS = 0
+    O1 = 1
+    O2 = 2
+
+    FIELDS = {
+      SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class 
=> ::PartitionValuesResponse},
+      O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => 
::MetaException},
+      O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => 
::NoSuchObjectException}
+    }
+
+    def struct_fields; FIELDS; end
+
+    def validate
+    end
+
+    ::Thrift::Struct.generate_accessors self
+  end
+
   class Get_partitions_ps_args
     include ::Thrift::Struct, ::Thrift::Struct_Union
     DB_NAME = 1

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 4d68948..4ac9269 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@ -3726,6 +3726,17 @@ public class HiveMetaStore extends ThriftHiveMetastore {
     }
 
     @Override
+    public PartitionValuesResponse get_partition_values(PartitionValuesRequest 
request) throws MetaException {
+      String dbName = request.getDbName();
+      String tblName = request.getTblName();
+      List<FieldSchema> partCols = new ArrayList<FieldSchema>();
+      partCols.add(request.getPartitionKeys().get(0));
+      return getMS().listPartitionValues(dbName, tblName, 
request.getPartitionKeys(),
+          request.isApplyDistinct(), request.getFilter(), 
request.isAscending(),
+          request.getPartitionOrder(), request.getMaxParts());
+    }
+
+    @Override
     public void alter_partition(final String db_name, final String tbl_name,
         final Partition new_part)
         throws InvalidOperationException, MetaException, TException {

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 76b61cc..d0fd319 100644
--- 
a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ 
b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@ -1324,6 +1324,12 @@ public class HiveMetaStoreClient implements 
IMetaStoreClient {
   }
 
   @Override
+  public PartitionValuesResponse listPartitionValues(PartitionValuesRequest 
request)
+      throws MetaException, TException, NoSuchObjectException {
+    return client.get_partition_values(request);
+  }
+
+  @Override
   public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
       List<String> part_vals, String user_name, List<String> group_names)
       throws MetaException, UnknownTableException, NoSuchObjectException,

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index e7ead6b..a28c510 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@ -26,7 +26,6 @@ import java.util.Map;
 import java.util.Map.Entry;
 
 import org.apache.hadoop.hive.common.ObjectPair;
-import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.ValidTxnList;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience;
 import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public;
@@ -77,6 +76,8 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
@@ -557,6 +558,9 @@ public interface IMetaStoreClient {
       List<String> part_vals, short max_parts)
       throws MetaException, TException, NoSuchObjectException;
 
+  public PartitionValuesResponse listPartitionValues(PartitionValuesRequest 
request)
+      throws MetaException, TException, NoSuchObjectException;
+
   /**
    * Get number of partitions matching specified filter
    * @param dbName the database name

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 5cb9395..d16d2a3 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -96,6 +96,8 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.Order;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -2243,6 +2245,250 @@ public class ObjectStore implements RawStore, 
Configurable {
     return pns;
   }
 
+  private String extractPartitionKey(FieldSchema key, List<FieldSchema> pkeys) 
{
+    StringBuilder buffer = new StringBuilder(256);
+
+    assert pkeys.size() >= 1;
+
+    String partKey = "/" + key.getName() + "=";
+
+    // Table is partitioned by single key
+    if (pkeys.size() == 1 && (pkeys.get(0).getName().matches(key.getName()))) {
+      buffer.append("partitionName.substring(partitionName.indexOf(\"")
+          .append(key.getName()).append("=\") + 
").append(key.getName().length() + 1)
+          .append(")");
+
+      // First partition key - anything between key= and first /
+    } else if ((pkeys.get(0).getName().matches(key.getName()))) {
+
+      buffer.append("partitionName.substring(partitionName.indexOf(\"")
+          .append(key.getName()).append("=\") + 
").append(key.getName().length() + 1).append(", ")
+          .append("partitionName.indexOf(\"/\")")
+          .append(")");
+
+      // Last partition key - anything between /key= and end
+    } else if ((pkeys.get(pkeys.size() - 1).getName().matches(key.getName()))) 
{
+      buffer.append("partitionName.substring(partitionName.indexOf(\"")
+          .append(partKey).append("\") + ").append(partKey.length())
+          .append(")");
+
+      // Intermediate key - anything between /key= and the following /
+    } else {
+
+      buffer.append("partitionName.substring(partitionName.indexOf(\"")
+          .append(partKey).append("\") + ").append(partKey.length()).append(", 
")
+          .append("partitionName.indexOf(\"/\", 
partitionName.indexOf(\"").append(partKey)
+          .append("\") + 1))");
+    }
+    LOG.info("Query for Key:" + key.getName() + " is :" + buffer);
+    return buffer.toString();
+  }
+
+  @Override
+  public PartitionValuesResponse listPartitionValues(String dbName, String 
tableName, List<FieldSchema> cols,
+                                                     boolean applyDistinct, 
String filter, boolean ascending,
+                                                     List<FieldSchema> order, 
long maxParts) throws MetaException {
+
+    dbName = dbName.toLowerCase().trim();
+    tableName = tableName.toLowerCase().trim();
+    try {
+      if (filter == null || filter.isEmpty()) {
+        PartitionValuesResponse response =
+            getDistinctValuesForPartitionsNoTxn(dbName, tableName, cols, 
applyDistinct, ascending, maxParts);
+        LOG.info("Number of records fetched: " + 
response.getPartitionValues().size());
+        return response;
+      } else {
+        PartitionValuesResponse response =
+            extractPartitionNamesByFilter(dbName, tableName, filter, cols, 
ascending, applyDistinct, maxParts);
+        if (response != null && response.getPartitionValues() != null) {
+          LOG.info("Number of records fetched with filter: " + 
response.getPartitionValues().size());
+        }
+        return response;
+      }
+    } catch (Exception t) {
+      LOG.error("Exception in ORM", t);
+      throw new MetaException("Error retrieving partition values: " + t);
+    } finally {
+    }
+  }
+
+  private PartitionValuesResponse extractPartitionNamesByFilter(String dbName, 
String tableName, String filter,
+                                                                
List<FieldSchema> cols, boolean ascending, boolean applyDistinct, long maxParts)
+      throws MetaException, NoSuchObjectException {
+
+    LOG.info("Database: " + dbName + " Table:" + tableName + " filter\"" + 
filter + "\" cols:" + cols);
+    List<String> partitionResults = new ArrayList<String>();
+    List<String> partitionNames = null;
+    List<Partition> partitions = null;
+    Table tbl = getTable(dbName, tableName);
+    try {
+      // Get partitions by name - ascending or descending
+      partitionNames = getPartitionNamesByFilter(dbName, tableName, filter, 
ascending, maxParts);
+    } catch (MetaException e) {
+      LOG.warn("Querying by partition names failed, trying out with partition 
objects, filter:" + filter);
+    }
+
+    if (partitionNames == null) {
+      partitions = getPartitionsByFilter(dbName, tableName, filter, (short) 
maxParts);
+    }
+
+    if (partitions != null) {
+      partitionNames = new ArrayList<String>(partitions.size());
+      for (Partition partition : partitions) {
+        // Check for NULL's just to be safe
+        if (tbl.getPartitionKeys() != null && partition.getValues() != null) {
+          partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), 
partition.getValues()));
+        }
+      }
+    }
+
+    if (partitionNames == null && partitions == null) {
+      throw new MetaException("Cannot obtain list of partitions by filter:\"" 
+ filter +
+          "\" for " + dbName + ":" + tableName);
+    }
+
+    if (!ascending) {
+      Collections.sort(partitionNames, Collections.reverseOrder());
+    }
+
+    // Return proper response
+    PartitionValuesResponse response = new PartitionValuesResponse();
+    response.setPartitionValues(new 
ArrayList<PartitionValuesRow>(partitionNames.size()));
+    LOG.info("Converting responses to Partition values for items:" + 
partitionNames.size());
+    for (String partName : partitionNames) {
+      ArrayList<String> vals = new 
ArrayList<String>(tbl.getPartitionKeys().size());
+      for (FieldSchema key : tbl.getPartitionKeys()) {
+        vals.add(null);
+      }
+      PartitionValuesRow row = new PartitionValuesRow();
+      Warehouse.makeValsFromName(partName, vals);
+      for (String value : vals) {
+        row.addToRow(value);
+      }
+      response.addToPartitionValues(row);
+    }
+    return response;
+  }
+
+  private List<String> getPartitionNamesByFilter(String dbName, String 
tableName,
+                                                 String filter, boolean 
ascending, long maxParts)
+      throws MetaException {
+
+    boolean success = false;
+    List<String> partNames = new ArrayList<String>();
+    try {
+      openTransaction();
+      LOG.debug("Executing getPartitionNamesByFilter");
+      dbName = dbName.toLowerCase();
+      tableName = tableName.toLowerCase();
+
+      MTable mtable = getMTable(dbName, tableName);
+      if( mtable == null ) {
+        // To be consistent with the behavior of listPartitionNames, if the
+        // table or db does not exist, we return an empty list
+        return partNames;
+      }
+      Map<String, Object> params = new HashMap<String, Object>();
+      String queryFilterString = makeQueryFilterString(dbName, mtable, filter, 
params);
+      Query query = pm.newQuery(
+          "select partitionName from 
org.apache.hadoop.hive.metastore.model.MPartition "
+              + "where " + queryFilterString);
+
+      if (maxParts >= 0) {
+        //User specified a row limit, set it on the Query
+        query.setRange(0, maxParts);
+      }
+
+      LOG.debug("Filter specified is " + filter + "," +
+          " JDOQL filter is " + queryFilterString);
+      LOG.debug("Parms is " + params);
+
+      String parameterDeclaration = makeParameterDeclarationStringObj(params);
+      query.declareParameters(parameterDeclaration);
+      if (ascending) {
+        query.setOrdering("partitionName ascending");
+      } else {
+        query.setOrdering("partitionName descending");
+      }
+      query.setResult("partitionName");
+
+      Collection names = (Collection) query.executeWithMap(params);
+      partNames = new ArrayList<String>();
+      for (Iterator i = names.iterator(); i.hasNext();) {
+        partNames.add((String) i.next());
+      }
+
+      LOG.debug("Done executing query for getPartitionNamesByFilter");
+      success = commitTransaction();
+      LOG.debug("Done retrieving all objects for getPartitionNamesByFilter, 
size:" + partNames.size());
+      query.closeAll();
+    } finally {
+      if (!success) {
+        rollbackTransaction();
+      }
+    }
+    return partNames;
+  }
+
+  private PartitionValuesResponse getDistinctValuesForPartitionsNoTxn(String 
dbName, String tableName, List<FieldSchema> cols,
+                                                                      boolean 
applyDistinct, boolean ascending, long maxParts)
+      throws MetaException {
+
+    try {
+      openTransaction();
+      Query q = pm.newQuery("select partitionName from 
org.apache.hadoop.hive.metastore.model.MPartition "
+          + "where table.database.name == t1 && table.tableName == t2 ");
+      q.declareParameters("java.lang.String t1, java.lang.String t2");
+
+      // TODO: Ordering seems to affect the distinctness, needs checking, 
disabling.
+/*
+      if (ascending) {
+        q.setOrdering("partitionName ascending");
+      } else {
+        q.setOrdering("partitionName descending");
+      }
+*/
+      if (maxParts > 0) {
+        q.setRange(0, maxParts);
+      }
+      StringBuilder partValuesSelect = new StringBuilder(256);
+      if (applyDistinct) {
+        partValuesSelect.append("DISTINCT ");
+      }
+      List<FieldSchema> partitionKeys = getTable(dbName, 
tableName).getPartitionKeys();
+      for (FieldSchema key : cols) {
+        partValuesSelect.append(extractPartitionKey(key, 
partitionKeys)).append(", ");
+      }
+      partValuesSelect.setLength(partValuesSelect.length() - 2);
+      LOG.info("Columns to be selected from Partitions: " + partValuesSelect);
+      q.setResult(partValuesSelect.toString());
+
+      PartitionValuesResponse response = new PartitionValuesResponse();
+      response.setPartitionValues(new ArrayList<PartitionValuesRow>());
+      if (cols.size() > 1) {
+        List<Object[]> results = (List<Object[]>) q.execute(dbName, tableName);
+        for (Object[] row : results) {
+          PartitionValuesRow rowResponse = new PartitionValuesRow();
+          for (Object columnValue : row) {
+            rowResponse.addToRow((String) columnValue);
+          }
+          response.addToPartitionValues(rowResponse);
+        }
+      } else {
+        List<Object> results = (List<Object>) q.execute(dbName, tableName);
+        for (Object row : results) {
+          PartitionValuesRow rowResponse = new PartitionValuesRow();
+          rowResponse.addToRow((String) row);
+          response.addToPartitionValues(rowResponse);
+        }
+      }
+      q.closeAll();
+      return response;
+    } finally {
+      commitTransaction();
+    }
+  }
+
   private List<String> getPartitionNamesNoTxn(String dbName, String tableName, 
short max) {
     List<String> pns = new ArrayList<String>();
     dbName = HiveStringUtils.normalizeIdentifier(dbName);

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
index 5b40835..639a1d8 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -28,11 +28,11 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.hive.common.classification.InterfaceStability;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -47,6 +47,7 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -197,6 +198,10 @@ public interface RawStore extends Configurable {
   public abstract List<String> listPartitionNames(String db_name,
       String tbl_name, short max_parts) throws MetaException;
 
+  public abstract PartitionValuesResponse listPartitionValues(String db_name, 
String tbl_name,
+                                                              
List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending,
+                                                              
List<FieldSchema> order, long maxParts) throws MetaException;
+
   public abstract List<String> listPartitionNamesByFilter(String db_name,
       String tbl_name, String filter, short max_parts) throws MetaException;
 

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java 
b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
index ecddb8a..9e30600 100644
--- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
+++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java
@@ -23,6 +23,7 @@ import com.google.common.cache.CacheLoader;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hive.common.ObjectPair;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -647,6 +648,11 @@ public class HBaseStore implements RawStore {
   }
 
   @Override
+  public PartitionValuesResponse listPartitionValues(String db_name, String 
tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean 
ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
   public List<String> listPartitionNamesByFilter(String db_name, String 
tbl_name, String filter,
                                                  short max_parts) throws 
MetaException {
     // TODO needs to wait until we support pushing filters into HBase.

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
 
b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 275797e..2babda2 100644
--- 
a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ 
b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@ -26,11 +26,11 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -263,6 +264,11 @@ public class DummyRawStoreControlledCommit implements 
RawStore, Configurable {
   }
 
   @Override
+  public PartitionValuesResponse listPartitionValues(String db_name, String 
tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean 
ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+    return null;
+  }
+
+  @Override
   public List<String> listPartitionNamesByFilter(String dbName, String tblName,
       String filter, short maxParts) throws MetaException {
     return objectStore.listPartitionNamesByFilter(dbName, tblName, filter, 
maxParts);

http://git-wip-us.apache.org/repos/asf/hive/blob/dec4efd3/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --git 
a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
 
b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 7f1784e..52391be 100644
--- 
a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ 
b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@ -27,11 +27,11 @@ import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
@@ -46,6 +46,7 @@ import 
org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
@@ -271,6 +272,11 @@ public class DummyRawStoreForJdoConnection implements 
RawStore {
   }
 
   @Override
+  public PartitionValuesResponse listPartitionValues(String db_name, String 
tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter, boolean 
ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+    return null;
+  }
+
+  @Override
   public List<String> listPartitionNamesByFilter(String db_name, String 
tbl_name, String filter,
       short max_parts) throws MetaException {
 

Reply via email to