hive-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From nzh...@apache.org
Subject svn commit: r923454 [3/3] - in /hadoop/hive/trunk: ./ metastore/if/ metastore/src/gen-cpp/ metastore/src/gen-javabean/org/apache/hadoop/hive/metastore/api/ metastore/src/gen-php/ metastore/src/gen-py/hive_metastore/ metastore/src/java/org/apache/hadoop...
Date Mon, 15 Mar 2010 21:49:45 GMT
Modified: hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py (original)
+++ hadoop/hive/trunk/metastore/src/gen-py/hive_metastore/ThriftHiveMetastore.py Mon Mar 15 21:49:45 2010
@@ -211,6 +211,26 @@ class Iface(fb303.FacebookService.Iface)
     """
     pass
 
+  def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts):
+    """
+    Parameters:
+     - db_name
+     - tbl_name
+     - part_vals
+     - max_parts
+    """
+    pass
+
+  def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts):
+    """
+    Parameters:
+     - db_name
+     - tbl_name
+     - part_vals
+     - max_parts
+    """
+    pass
+
   def alter_partition(self, db_name, tbl_name, new_part):
     """
     Parameters:
@@ -1095,6 +1115,82 @@ class Client(fb303.FacebookService.Clien
       raise result.o2
     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names failed: unknown result");
 
+  def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts):
+    """
+    Parameters:
+     - db_name
+     - tbl_name
+     - part_vals
+     - max_parts
+    """
+    self.send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts)
+    return self.recv_get_partitions_ps()
+
+  def send_get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts):
+    self._oprot.writeMessageBegin('get_partitions_ps', TMessageType.CALL, self._seqid)
+    args = get_partitions_ps_args()
+    args.db_name = db_name
+    args.tbl_name = tbl_name
+    args.part_vals = part_vals
+    args.max_parts = max_parts
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_partitions_ps(self, ):
+    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(self._iprot)
+      self._iprot.readMessageEnd()
+      raise x
+    result = get_partitions_ps_result()
+    result.read(self._iprot)
+    self._iprot.readMessageEnd()
+    if result.success != None:
+      return result.success
+    if result.o1 != None:
+      raise result.o1
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_ps failed: unknown result");
+
+  def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts):
+    """
+    Parameters:
+     - db_name
+     - tbl_name
+     - part_vals
+     - max_parts
+    """
+    self.send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)
+    return self.recv_get_partition_names_ps()
+
+  def send_get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts):
+    self._oprot.writeMessageBegin('get_partition_names_ps', TMessageType.CALL, self._seqid)
+    args = get_partition_names_ps_args()
+    args.db_name = db_name
+    args.tbl_name = tbl_name
+    args.part_vals = part_vals
+    args.max_parts = max_parts
+    args.write(self._oprot)
+    self._oprot.writeMessageEnd()
+    self._oprot.trans.flush()
+
+  def recv_get_partition_names_ps(self, ):
+    (fname, mtype, rseqid) = self._iprot.readMessageBegin()
+    if mtype == TMessageType.EXCEPTION:
+      x = TApplicationException()
+      x.read(self._iprot)
+      self._iprot.readMessageEnd()
+      raise x
+    result = get_partition_names_ps_result()
+    result.read(self._iprot)
+    self._iprot.readMessageEnd()
+    if result.success != None:
+      return result.success
+    if result.o1 != None:
+      raise result.o1
+    raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partition_names_ps failed: unknown result");
+
   def alter_partition(self, db_name, tbl_name, new_part):
     """
     Parameters:
@@ -1193,6 +1289,8 @@ class Processor(fb303.FacebookService.Pr
     self._processMap["get_partition_by_name"] = Processor.process_get_partition_by_name
     self._processMap["get_partitions"] = Processor.process_get_partitions
     self._processMap["get_partition_names"] = Processor.process_get_partition_names
+    self._processMap["get_partitions_ps"] = Processor.process_get_partitions_ps
+    self._processMap["get_partition_names_ps"] = Processor.process_get_partition_names_ps
     self._processMap["alter_partition"] = Processor.process_alter_partition
     self._processMap["get_config_value"] = Processor.process_get_config_value
 
@@ -1595,6 +1693,34 @@ class Processor(fb303.FacebookService.Pr
     oprot.writeMessageEnd()
     oprot.trans.flush()
 
+  def process_get_partitions_ps(self, seqid, iprot, oprot):
+    args = get_partitions_ps_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_partitions_ps_result()
+    try:
+      result.success = self._handler.get_partitions_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts)
+    except MetaException, o1:
+      result.o1 = o1
+    oprot.writeMessageBegin("get_partitions_ps", TMessageType.REPLY, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
+  def process_get_partition_names_ps(self, seqid, iprot, oprot):
+    args = get_partition_names_ps_args()
+    args.read(iprot)
+    iprot.readMessageEnd()
+    result = get_partition_names_ps_result()
+    try:
+      result.success = self._handler.get_partition_names_ps(args.db_name, args.tbl_name, args.part_vals, args.max_parts)
+    except MetaException, o1:
+      result.o1 = o1
+    oprot.writeMessageBegin("get_partition_names_ps", TMessageType.REPLY, seqid)
+    result.write(oprot)
+    oprot.writeMessageEnd()
+    oprot.trans.flush()
+
   def process_alter_partition(self, seqid, iprot, oprot):
     args = alter_partition_args()
     args.read(iprot)
@@ -5288,6 +5414,359 @@ class get_partition_names_result:
   def __ne__(self, other):
     return not (self == other)
 
+class get_partitions_ps_args:
+  """
+  Attributes:
+   - db_name
+   - tbl_name
+   - part_vals
+   - max_parts
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'db_name', None, None, ), # 1
+    (2, TType.STRING, 'tbl_name', None, None, ), # 2
+    (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3
+    (4, TType.I16, 'max_parts', None, -1, ), # 4
+  )
+
+  def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4],):
+    self.db_name = db_name
+    self.tbl_name = tbl_name
+    self.part_vals = part_vals
+    self.max_parts = max_parts
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.db_name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.tbl_name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.LIST:
+          self.part_vals = []
+          (_etype176, _size173) = iprot.readListBegin()
+          for _i177 in xrange(_size173):
+            _elem178 = iprot.readString();
+            self.part_vals.append(_elem178)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.I16:
+          self.max_parts = iprot.readI16();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_partitions_ps_args')
+    if self.db_name != None:
+      oprot.writeFieldBegin('db_name', TType.STRING, 1)
+      oprot.writeString(self.db_name)
+      oprot.writeFieldEnd()
+    if self.tbl_name != None:
+      oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
+      oprot.writeString(self.tbl_name)
+      oprot.writeFieldEnd()
+    if self.part_vals != None:
+      oprot.writeFieldBegin('part_vals', TType.LIST, 3)
+      oprot.writeListBegin(TType.STRING, len(self.part_vals))
+      for iter179 in self.part_vals:
+        oprot.writeString(iter179)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.max_parts != None:
+      oprot.writeFieldBegin('max_parts', TType.I16, 4)
+      oprot.writeI16(self.max_parts)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_partitions_ps_result:
+  """
+  Attributes:
+   - success
+   - o1
+  """
+
+  thrift_spec = (
+    (0, TType.LIST, 'success', (TType.STRUCT,(Partition, Partition.thrift_spec)), None, ), # 0
+    (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+  )
+
+  def __init__(self, success=None, o1=None,):
+    self.success = success
+    self.o1 = o1
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.LIST:
+          self.success = []
+          (_etype183, _size180) = iprot.readListBegin()
+          for _i184 in xrange(_size180):
+            _elem185 = Partition()
+            _elem185.read(iprot)
+            self.success.append(_elem185)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 1:
+        if ftype == TType.STRUCT:
+          self.o1 = MetaException()
+          self.o1.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_partitions_ps_result')
+    if self.success != None:
+      oprot.writeFieldBegin('success', TType.LIST, 0)
+      oprot.writeListBegin(TType.STRUCT, len(self.success))
+      for iter186 in self.success:
+        iter186.write(oprot)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.o1 != None:
+      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+      self.o1.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_partition_names_ps_args:
+  """
+  Attributes:
+   - db_name
+   - tbl_name
+   - part_vals
+   - max_parts
+  """
+
+  thrift_spec = (
+    None, # 0
+    (1, TType.STRING, 'db_name', None, None, ), # 1
+    (2, TType.STRING, 'tbl_name', None, None, ), # 2
+    (3, TType.LIST, 'part_vals', (TType.STRING,None), None, ), # 3
+    (4, TType.I16, 'max_parts', None, -1, ), # 4
+  )
+
+  def __init__(self, db_name=None, tbl_name=None, part_vals=None, max_parts=thrift_spec[4][4],):
+    self.db_name = db_name
+    self.tbl_name = tbl_name
+    self.part_vals = part_vals
+    self.max_parts = max_parts
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 1:
+        if ftype == TType.STRING:
+          self.db_name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 2:
+        if ftype == TType.STRING:
+          self.tbl_name = iprot.readString();
+        else:
+          iprot.skip(ftype)
+      elif fid == 3:
+        if ftype == TType.LIST:
+          self.part_vals = []
+          (_etype190, _size187) = iprot.readListBegin()
+          for _i191 in xrange(_size187):
+            _elem192 = iprot.readString();
+            self.part_vals.append(_elem192)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 4:
+        if ftype == TType.I16:
+          self.max_parts = iprot.readI16();
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_partition_names_ps_args')
+    if self.db_name != None:
+      oprot.writeFieldBegin('db_name', TType.STRING, 1)
+      oprot.writeString(self.db_name)
+      oprot.writeFieldEnd()
+    if self.tbl_name != None:
+      oprot.writeFieldBegin('tbl_name', TType.STRING, 2)
+      oprot.writeString(self.tbl_name)
+      oprot.writeFieldEnd()
+    if self.part_vals != None:
+      oprot.writeFieldBegin('part_vals', TType.LIST, 3)
+      oprot.writeListBegin(TType.STRING, len(self.part_vals))
+      for iter193 in self.part_vals:
+        oprot.writeString(iter193)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.max_parts != None:
+      oprot.writeFieldBegin('max_parts', TType.I16, 4)
+      oprot.writeI16(self.max_parts)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
+class get_partition_names_ps_result:
+  """
+  Attributes:
+   - success
+   - o1
+  """
+
+  thrift_spec = (
+    (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0
+    (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1
+  )
+
+  def __init__(self, success=None, o1=None,):
+    self.success = success
+    self.o1 = o1
+
+  def read(self, iprot):
+    if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+      fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+      return
+    iprot.readStructBegin()
+    while True:
+      (fname, ftype, fid) = iprot.readFieldBegin()
+      if ftype == TType.STOP:
+        break
+      if fid == 0:
+        if ftype == TType.LIST:
+          self.success = []
+          (_etype197, _size194) = iprot.readListBegin()
+          for _i198 in xrange(_size194):
+            _elem199 = iprot.readString();
+            self.success.append(_elem199)
+          iprot.readListEnd()
+        else:
+          iprot.skip(ftype)
+      elif fid == 1:
+        if ftype == TType.STRUCT:
+          self.o1 = MetaException()
+          self.o1.read(iprot)
+        else:
+          iprot.skip(ftype)
+      else:
+        iprot.skip(ftype)
+      iprot.readFieldEnd()
+    iprot.readStructEnd()
+
+  def write(self, oprot):
+    if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+      oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+      return
+    oprot.writeStructBegin('get_partition_names_ps_result')
+    if self.success != None:
+      oprot.writeFieldBegin('success', TType.LIST, 0)
+      oprot.writeListBegin(TType.STRING, len(self.success))
+      for iter200 in self.success:
+        oprot.writeString(iter200)
+      oprot.writeListEnd()
+      oprot.writeFieldEnd()
+    if self.o1 != None:
+      oprot.writeFieldBegin('o1', TType.STRUCT, 1)
+      self.o1.write(oprot)
+      oprot.writeFieldEnd()
+    oprot.writeFieldStop()
+    oprot.writeStructEnd()
+
+  def __repr__(self):
+    L = ['%s=%r' % (key, value)
+      for key, value in self.__dict__.iteritems()]
+    return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+  def __eq__(self, other):
+    return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+  def __ne__(self, other):
+    return not (self == other)
+
 class alter_partition_args:
   """
   Attributes:

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java Mon Mar 15 21:49:45 2010
@@ -152,7 +152,7 @@ public class HiveMetaStore extends Thrif
 
     /**
      * create default database if it doesn't exist
-     * 
+     *
      * @throws MetaException
      */
     private void createDefaultDB() throws MetaException {
@@ -193,6 +193,7 @@ public class HiveMetaStore extends Thrif
       return fb_status.ALIVE;
     }
 
+    @Override
     public void shutdown() {
       logStartFunction("Shutting down the object store...");
       try {
@@ -412,7 +413,7 @@ public class HiveMetaStore extends Thrif
 
     /**
      * Is this an external table?
-     * 
+     *
      * @param table
      *          Check if this table is external.
      * @return True if the table is external, otherwise false.
@@ -499,7 +500,7 @@ public class HiveMetaStore extends Thrif
       return part;
 
     }
-    
+
     public Partition append_partition(String dbName, String tableName,
         List<String> part_vals) throws InvalidObjectException,
         AlreadyExistsException, MetaException {
@@ -510,7 +511,7 @@ public class HiveMetaStore extends Thrif
           LOG.debug(part);
         }
       }
-      return append_partition_common(dbName, tableName, part_vals);      
+      return append_partition_common(dbName, tableName, part_vals);
     }
 
     public int add_partitions(List<Partition> parts) throws MetaException,
@@ -570,7 +571,7 @@ public class HiveMetaStore extends Thrif
         part.getSd().setLocation(partLocation.toString());
 
         // Check to see if the directory already exists before calling mkdirs()
-        // because if the file system is read-only, mkdirs will throw an  
+        // because if the file system is read-only, mkdirs will throw an
         // exception even if the directory already exists.
         if (!wh.isDir(partLocation)) {
           if (!wh.mkdirs(partLocation)) {
@@ -637,7 +638,7 @@ public class HiveMetaStore extends Thrif
       incrementCounter("drop_partition");
       logStartFunction("drop_partition", db_name, tbl_name);
       LOG.info("Partition values:" + part_vals);
-      
+
       return drop_partition_common(db_name, tbl_name, part_vals, deleteData);
     }
 
@@ -739,7 +740,7 @@ public class HiveMetaStore extends Thrif
     /**
      * Return the schema of the table. This function includes partition columns
      * in addition to the regular columns.
-     * 
+     *
      * @param db
      *          Name of the database
      * @param tableName
@@ -812,11 +813,11 @@ public class HiveMetaStore extends Thrif
       return toReturn;
     }
 
-    private List<String> getPartValsFromName(String dbName, String tblName, 
+    private List<String> getPartValsFromName(String dbName, String tblName,
         String partName) throws MetaException, InvalidObjectException {
       // Unescape the partition name
       LinkedHashMap<String, String> hm = Warehouse.makeSpecFromName(partName);
-      
+
       // getPartition expects partition values in a list. use info from the
       // table to put the partition column values in order
       Table t = getMS().getTable(dbName, tblName);
@@ -824,7 +825,7 @@ public class HiveMetaStore extends Thrif
         throw new InvalidObjectException(dbName + "." + tblName
             + " table not found");
       }
-      
+
       List<String> partVals = new ArrayList<String>();
       for(FieldSchema field : t.getPartitionKeys()) {
         String key = field.getName();
@@ -836,13 +837,13 @@ public class HiveMetaStore extends Thrif
       }
       return partVals;
     }
-    
+
     public Partition get_partition_by_name(String db_name, String tbl_name,
         String part_name) throws MetaException, NoSuchObjectException, TException {
       incrementCounter("get_partition_by_name");
       logStartFunction("get_partition_by_name: db=" + db_name + " tbl="
           + tbl_name + " part=" + part_name);
-     
+
       List<String> partVals = null;
       try {
         partVals = getPartValsFromName(db_name, tbl_name, part_name);
@@ -850,7 +851,7 @@ public class HiveMetaStore extends Thrif
         throw new NoSuchObjectException(e.getMessage());
       }
       Partition p = getMS().getPartition(db_name, tbl_name, partVals);
-      
+
       if(p == null) {
         throw new NoSuchObjectException(db_name + "." + tbl_name
             + " partition (" + part_name + ") not found");
@@ -859,13 +860,13 @@ public class HiveMetaStore extends Thrif
     }
 
     public Partition append_partition_by_name(String db_name, String tbl_name,
-        String part_name) throws InvalidObjectException, 
+        String part_name) throws InvalidObjectException,
         AlreadyExistsException, MetaException, TException {
       incrementCounter("append_partition_by_name");
       logStartFunction("append_partition_by_name: db=" + db_name + " tbl="
           + tbl_name + " part=" + part_name);
       List<String> partVals = getPartValsFromName(db_name, tbl_name, part_name);
-      
+
       return append_partition_common(db_name, tbl_name, partVals);
     }
 
@@ -876,16 +877,77 @@ public class HiveMetaStore extends Thrif
       incrementCounter("drop_partition_by_name");
       logStartFunction("drop_partition_by_name: db=" + db_name + " tbl="
           + tbl_name + " part=" + part_name);
-      
+
       List<String> partVals = null;
       try {
         partVals = getPartValsFromName(db_name, tbl_name, part_name);
       } catch (InvalidObjectException e) {
         throw new NoSuchObjectException(e.getMessage());
       }
-      
+
       return drop_partition_common(db_name, tbl_name, partVals, deleteData);
     }
+
+    @Override
+    public List<Partition> get_partitions_ps(String db_name, String tbl_name,
+        List<String> part_vals, short max_parts) throws MetaException,
+        TException {
+      incrementCounter("get_partitions_ps");
+      logStartFunction("get_partitions_ps", db_name, tbl_name);
+      List<Partition> parts = null;
+      List<Partition> matchingParts = new ArrayList<Partition>();
+
+      // This gets all the partitions and then filters based on the specified
+      // criteria. An alternative approach would be to get all the partition
+      // names, do the filtering on the names, and get the partition for each
+      // of the names. that match.
+
+      try {
+         parts = get_partitions(db_name, tbl_name, (short) -1);
+      } catch (NoSuchObjectException e) {
+        throw new MetaException(e.getMessage());
+      }
+
+      for (Partition p : parts) {
+        if (MetaStoreUtils.pvalMatches(part_vals, p.getValues())) {
+          matchingParts.add(p);
+        }
+      }
+
+      return matchingParts;
+    }
+
+    @Override
+    public List<String> get_partition_names_ps(String db_name, String tbl_name,
+        List<String> part_vals, short max_parts) throws MetaException, TException {
+      incrementCounter("get_partition_names_ps");
+      logStartFunction("get_partitions_names_ps", db_name, tbl_name);
+      Table t;
+      try {
+        t = get_table(db_name, tbl_name);
+      } catch (NoSuchObjectException e) {
+        throw new MetaException(e.getMessage());
+      }
+
+     List<String> partNames = get_partition_names(db_name, tbl_name, max_parts);
+     List<String> filteredPartNames = new ArrayList<String>();
+
+      for(String name : partNames) {
+        LinkedHashMap<String, String> spec = Warehouse.makeSpecFromName(name);
+        List<String> vals = new ArrayList<String>();
+        // Since we are iterating through a LinkedHashMap, iteration should
+        // return the partition values in the correct order for comparison.
+        for (String val : spec.values()) {
+          vals.add(val);
+        }
+        if (MetaStoreUtils.pvalMatches(part_vals, vals)) {
+          filteredPartNames.add(name);
+        }
+      }
+
+      return filteredPartNames;
+    }
+
   }
 
   /**

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java Mon Mar 15 21:49:45 2010
@@ -242,6 +242,10 @@ public class HiveMetaStoreClient impleme
     return client.append_partition(db_name, table_name, part_vals);
   }
 
+  public Partition appendPartition(String dbName, String tableName, String partName)
+      throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+    return client.append_partition_by_name(dbName, tableName, partName);
+  }
   /**
    * @param name
    * @param location_uri
@@ -326,6 +330,10 @@ public class HiveMetaStoreClient impleme
     return dropPartition(db_name, tbl_name, part_vals, true);
   }
 
+  public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData)
+      throws NoSuchObjectException, MetaException, TException {
+    return client.drop_partition_by_name(dbName, tableName, partName, deleteData);
+  }
   /**
    * @param db_name
    * @param tbl_name
@@ -453,6 +461,12 @@ public class HiveMetaStoreClient impleme
     return client.get_partitions(db_name, tbl_name, max_parts);
   }
 
+  @Override
+  public List<Partition> listPartitions(String db_name, String tbl_name, List<String> part_vals,
+      short max_parts) throws NoSuchObjectException, MetaException, TException {
+    return client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts);
+  }
+
   /**
    * @param name
    * @return the database
@@ -543,6 +557,12 @@ public class HiveMetaStoreClient impleme
     return client.get_partition_names(dbName, tblName, max);
   }
 
+  @Override
+  public List<String> listPartitionNames(String db_name, String tbl_name,
+      List<String> part_vals, short max_parts) throws MetaException, TException {
+    return client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts);
+  }
+
   public void alter_partition(String dbName, String tblName, Partition newPart)
       throws InvalidOperationException, MetaException, TException {
     client.alter_partition(dbName, tblName, newPart);
@@ -585,20 +605,11 @@ public class HiveMetaStoreClient impleme
     return client.get_config_value(name, defaultValue);
   }
 
-  public Partition getPartitionByName(String db, String tableName, String partName)
+  public Partition getPartition(String db, String tableName, String partName)
       throws MetaException, TException, UnknownTableException, NoSuchObjectException {
     return client.get_partition_by_name(db, tableName, partName);
   }
 
-  public Partition appendPartitionByName(String dbName, String tableName, String partName) 
-      throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
-    return client.append_partition_by_name(dbName, tableName, partName);
-  }
-  
-  public boolean dropPartitionByName(String dbName, String tableName, String partName, boolean deleteData) 
-      throws NoSuchObjectException, MetaException, TException {
-    return client.drop_partition_by_name(dbName, tableName, partName, deleteData);
-  }
 
   private HiveMetaHook getHook(Table tbl) throws MetaException {
     if (hookLoader == null) {

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java Mon Mar 15 21:49:45 2010
@@ -47,7 +47,7 @@ public interface IMetaStoreClient {
 
   /**
    * Drop the table.
-   * 
+   *
    * @param tableName
    *          The table to drop
    * @param deleteData
@@ -67,7 +67,7 @@ public interface IMetaStoreClient {
 
   /**
    * Drop the table.
-   * 
+   *
    * @param dbname
    *          The database for this table
    * @param tableName
@@ -93,7 +93,7 @@ public interface IMetaStoreClient {
 
   /**
    * Get a table object.
-   * 
+   *
    * @param tableName
    *          Name of the table to fetch.
    * @return An object representing the table.
@@ -109,7 +109,7 @@ public interface IMetaStoreClient {
 
   /**
    * Get a table object.
-   * 
+   *
    * @param dbName
    *          The database the table is located in.
    * @param tableName
@@ -141,9 +141,11 @@ public interface IMetaStoreClient {
       List<String> partVals) throws InvalidObjectException,
       AlreadyExistsException, MetaException, TException;
 
+  public Partition appendPartition(String tableName, String dbName, String name)
+      throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
   /**
    * Add a partition to the table.
-   * 
+   *
    * @param partition
    *          The partition to add
    * @return The partition added
@@ -183,9 +185,9 @@ public interface IMetaStoreClient {
    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String,
    *      java.lang.String, java.util.List)
    */
-  public Partition getPartitionByName(String dbName, String tblName,
+  public Partition getPartition(String dbName, String tblName,
       String name) throws MetaException, UnknownTableException, NoSuchObjectException, TException;
-  
+
   /**
    * @param tbl_name
    * @param db_name
@@ -198,9 +200,15 @@ public interface IMetaStoreClient {
   public List<Partition> listPartitions(String db_name, String tbl_name,
       short max_parts) throws NoSuchObjectException, MetaException, TException;
 
+  public List<Partition> listPartitions(String db_name, String tbl_name,
+      List<String> part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException;
+
   public List<String> listPartitionNames(String db_name, String tbl_name,
       short max_parts) throws MetaException, TException;
 
+  public List<String> listPartitionNames(String db_name, String tbl_name,
+      List<String> part_vals, short max_parts) throws MetaException, TException;
+
   /**
    * @param tbl
    * @throws AlreadyExistsException
@@ -238,9 +246,12 @@ public interface IMetaStoreClient {
       List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
       MetaException, TException;
 
+  public boolean dropPartition(String db_name, String tbl_name,
+      String name, boolean deleteData) throws NoSuchObjectException,
+      MetaException, TException;
   /**
    * updates a partition to new partition
-   * 
+   *
    * @param dbName
    *          database of the old partition
    * @param tblName

Modified: hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java (original)
+++ hadoop/hive/trunk/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreUtils.java Mon Mar 15 21:49:45 2010
@@ -22,6 +22,7 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
@@ -34,7 +35,6 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.JavaUtils;
 import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.api.Constants;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
@@ -53,7 +53,6 @@ import org.apache.hadoop.hive.serde2.obj
 import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
 import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category;
 import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 
 public class MetaStoreUtils {
@@ -64,12 +63,12 @@ public class MetaStoreUtils {
 
   /**
    * printStackTrace
-   * 
+   *
    * Helper function to print an exception stack trace to the log and not stderr
-   * 
+   *
    * @param e
    *          the exception
-   * 
+   *
    */
   static public void printStackTrace(Exception e) {
     for (StackTraceElement s : e.getStackTrace()) {
@@ -118,15 +117,15 @@ public class MetaStoreUtils {
 
   /**
    * recursiveDelete
-   * 
+   *
    * just recursively deletes a dir - you'd think Java would have something to
    * do this??
-   * 
+   *
    * @param f
    *          - the file/dir to delete
    * @exception IOException
    *              propogate f.delete() exceptions
-   * 
+   *
    */
   static public void recursiveDelete(File f) throws IOException {
     if (f.isDirectory()) {
@@ -142,9 +141,9 @@ public class MetaStoreUtils {
 
   /**
    * getDeserializer
-   * 
+   *
    * Get the Deserializer for a table given its name and properties.
-   * 
+   *
    * @param conf
    *          hadoop config
    * @param schema
@@ -152,9 +151,9 @@ public class MetaStoreUtils {
    * @return the Deserializer
    * @exception MetaException
    *              if any problems instantiating the Deserializer
-   * 
+   *
    *              todo - this should move somewhere into serde.jar
-   * 
+   *
    */
   static public Deserializer getDeserializer(Configuration conf,
       Properties schema) throws MetaException {
@@ -174,9 +173,9 @@ public class MetaStoreUtils {
 
   /**
    * getDeserializer
-   * 
+   *
    * Get the Deserializer for a table.
-   * 
+   *
    * @param conf
    *          - hadoop config
    * @param table
@@ -184,9 +183,9 @@ public class MetaStoreUtils {
    * @return the Deserializer
    * @exception MetaException
    *              if any problems instantiating the Deserializer
-   * 
+   *
    *              todo - this should move somewhere into serde.jar
-   * 
+   *
    */
   static public Deserializer getDeserializer(Configuration conf,
       org.apache.hadoop.hive.metastore.api.Table table) throws MetaException {
@@ -210,9 +209,9 @@ public class MetaStoreUtils {
 
   /**
    * getDeserializer
-   * 
+   *
    * Get the Deserializer for a partition.
-   * 
+   *
    * @param conf
    *          - hadoop config
    * @param partition
@@ -220,7 +219,7 @@ public class MetaStoreUtils {
    * @return the Deserializer
    * @exception MetaException
    *              if any problems instantiating the Deserializer
-   * 
+   *
    */
   static public Deserializer getDeserializer(Configuration conf,
       org.apache.hadoop.hive.metastore.api.Partition part,
@@ -289,10 +288,10 @@ public class MetaStoreUtils {
 
   /**
    * validateName
-   * 
+   *
    * Checks the name conforms to our standars which are: "[a-zA-z_0-9]+". checks
    * this is just characters and numbers and _
-   * 
+   *
    * @param name
    *          the name to validate
    * @return true or false depending on conformance
@@ -521,7 +520,7 @@ public class MetaStoreUtils {
 
   /**
    * Convert FieldSchemas to Thrift DDL + column names and column types
-   * 
+   *
    * @param structName
    *          The name of the table
    * @param fieldSchemas
@@ -665,7 +664,7 @@ public class MetaStoreUtils {
           org.apache.hadoop.hive.serde.Constants.SERIALIZATION_DDL,
           getDDLFromFieldSchema(tableName, sd.getCols()));
     }
-    
+
     String partString = "";
     String partStringSep = "";
     for (FieldSchema partKey : partitionKeys) {
@@ -736,7 +735,7 @@ public class MetaStoreUtils {
 
   /**
    * Catches exceptions that can't be handled and bundles them to MetaException
-   * 
+   *
    * @param e
    * @throws MetaException
    */
@@ -841,4 +840,27 @@ public class MetaStoreUtils {
     }
     return (table.getParameters().get(Constants.META_TABLE_STORAGE) != null);
   }
+
+  /**
+   * Returns true if partial has the same values as full for all values that
+   * aren't empty in partial.
+   */
+
+  public static boolean pvalMatches(List<String> partial, List<String> full) {
+    if(partial.size() != full.size()) {
+      return false;
+    }
+    Iterator<String> p = partial.iterator();
+    Iterator<String> f = full.iterator();
+
+    while(p.hasNext()) {
+      String pval = p.next();
+      String fval = f.next();
+
+      if (pval.length() != 0 && !pval.equals(fval)) {
+        return false;
+      }
+    }
+    return true;
+  }
 }

Modified: hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java (original)
+++ hadoop/hive/trunk/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStore.java Mon Mar 15 21:49:45 2010
@@ -20,7 +20,9 @@ package org.apache.hadoop.hive.metastore
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import junit.framework.TestCase;
 
@@ -47,6 +49,7 @@ public class TestHiveMetaStore extends T
   private HiveMetaStoreClient client;
   private HiveConf hiveConf;
 
+  @Override
   protected void setUp() throws Exception {
     super.setUp();
     hiveConf = new HiveConf(this.getClass());
@@ -66,6 +69,7 @@ public class TestHiveMetaStore extends T
     }
   }
 
+  @Override
   protected void tearDown() throws Exception {
     try {
       super.tearDown();
@@ -80,7 +84,7 @@ public class TestHiveMetaStore extends T
   /**
    * tests create table and partition and tries to drop the table without
    * droppping the partition
-   * 
+   *
    * @throws Exception
    */
   public void testPartition() throws Exception {
@@ -91,6 +95,13 @@ public class TestHiveMetaStore extends T
       List<String> vals = new ArrayList<String>(2);
       vals.add("2008-07-01 14:13:12");
       vals.add("14");
+      List <String> vals2 = new ArrayList<String>(2);
+      vals2.add("2008-07-01 14:13:12");
+      vals2.add("15");
+      List <String> vals3 = new ArrayList<String>(2);
+      vals3 = new ArrayList<String>(2);
+      vals3.add("2008-07-02 14:13:12");
+      vals3.add("15");
 
       client.dropTable(dbName, tblName);
       client.dropDatabase(dbName);
@@ -144,25 +155,93 @@ public class TestHiveMetaStore extends T
       part.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
       part.getSd().setLocation(tbl.getSd().getLocation() + "/part1");
 
+      Partition part2 = new Partition();
+      part2.setDbName(dbName);
+      part2.setTableName(tblName);
+      part2.setValues(vals2);
+      part2.setParameters(new HashMap<String, String>());
+      part2.setSd(tbl.getSd());
+      part2.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+      part2.getSd().setLocation(tbl.getSd().getLocation() + "/part2");
+
+      Partition part3 = new Partition();
+      part3.setDbName(dbName);
+      part3.setTableName(tblName);
+      part3.setValues(vals3);
+      part3.setParameters(new HashMap<String, String>());
+      part3.setSd(tbl.getSd());
+      part3.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo());
+      part3.getSd().setLocation(tbl.getSd().getLocation() + "/part2");
+
       Partition retp = client.add_partition(part);
       assertNotNull("Unable to create partition " + part, retp);
+      Partition retp2 = client.add_partition(part2);
+      assertNotNull("Unable to create partition " + part2, retp2);
+      Partition retp3 = client.add_partition(part3);
+      assertNotNull("Unable to create partition " + part3, retp3);
 
-      Partition part2 = client.getPartition(dbName, tblName, part.getValues());
-      assertTrue("Partitions are not same", part.equals(part2));
+      Partition part_get = client.getPartition(dbName, tblName, part.getValues());
+      assertTrue("Partitions are not same", part.equals(part_get));
 
       String partName = "ds=2008-07-01 14%3A13%3A12/hr=14";
-      Partition part3 = client.getPartitionByName(dbName, tblName, partName);
-      assertTrue("Partitions are not the same", part.equals(part2));
-      
+      String part2Name = "ds=2008-07-01 14%3A13%3A12/hr=15";
+      String part3Name ="ds=2008-07-02 14%3A13%3A12/hr=15";
+
+      part_get = client.getPartition(dbName, tblName, partName);
+      assertTrue("Partitions are not the same", part.equals(part_get));
+
+      // Test partition listing with a partial spec - ds is specified but hr is not
+      List<String> partialVals = new ArrayList<String>();
+      partialVals.add(vals.get(0));
+      partialVals.add("");
+      Set<Partition> parts = new HashSet<Partition>();
+      parts.add(part);
+      parts.add(part2);
+
+      List<Partition> partial = client.listPartitions(dbName, tblName, partialVals,
+          (short) -1);
+      assertTrue("Should have returned 2 partitions", partial.size() == 2);
+      assertTrue("Not all parts returned", partial.containsAll(parts));
+
+      Set<String> partNames = new HashSet<String>();
+      partNames.add(partName);
+      partNames.add(part2Name);
+      List<String> partialNames = client.listPartitionNames(dbName, tblName, partialVals,
+          (short) -1);
+      assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
+      assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+
+      // Test partition listing with a partial spec - hr is specified but ds is not
+      parts.clear();
+      parts.add(part2);
+      parts.add(part3);
+
+      partialVals.clear();
+      partialVals.add("");
+      partialVals.add(vals2.get(1));
+
+      partial = client.listPartitions(dbName, tblName, partialVals, (short) -1);
+      assertTrue("Should have returned 2 partitions", partial.size() == 2);
+      assertTrue("Not all parts returned", partial.containsAll(parts));
+
+      partNames.clear();
+      partNames.add(part2Name);
+      partNames.add(part3Name);
+      partialNames = client.listPartitionNames(dbName, tblName, partialVals,
+          (short) -1);
+      assertTrue("Should have returned 2 partition names", partialNames.size() == 2);
+      assertTrue("Not all part names returned", partialNames.containsAll(partNames));
+
+      // Verify escaped partition names don't return partitions
       boolean exceptionThrown = false;
       try {
         String badPartName = "ds=2008-07-01 14%3A13%3A12/hrs=14";
-        client.getPartitionByName(dbName, tblName, badPartName);
+        client.getPartition(dbName, tblName, badPartName);
       } catch(NoSuchObjectException e) {
         exceptionThrown = true;
       }
       assertTrue("Bad partition spec should have thrown an exception", exceptionThrown);
-      
+
       FileSystem fs = FileSystem.get(hiveConf);
       Path partPath = new Path(part2.getSd().getLocation());
 
@@ -172,15 +251,15 @@ public class TestHiveMetaStore extends T
       assertFalse(fs.exists(partPath));
 
       // Test append_partition_by_name
-      client.appendPartitionByName(dbName, tblName, partName);
+      client.appendPartition(dbName, tblName, partName);
       Partition part4 = client.getPartition(dbName, tblName, part.getValues());
       assertTrue("Append partition by name failed", part4.getValues().equals(vals));;
       Path part4Path = new Path(part4.getSd().getLocation());
       assertTrue(fs.exists(part4Path));
-      
+
       // Test drop_partition_by_name
-      assertTrue("Drop partition by name failed", 
-          client.dropPartitionByName(dbName, tblName, partName, true));
+      assertTrue("Drop partition by name failed",
+          client.dropPartition(dbName, tblName, partName, true));
       assertFalse(fs.exists(part4Path));
 
       // add the partition again so that drop table with a partition can be
@@ -202,10 +281,10 @@ public class TestHiveMetaStore extends T
       assertTrue(fs.exists(partPath));
       client.dropPartition(dbName, tblName, part.getValues(), true);
       assertTrue(fs.exists(partPath));
-      
+
       ret = client.dropDatabase(dbName);
       assertTrue("Unable to create the databse " + dbName, ret);
-      
+
     } catch (Exception e) {
       System.err.println(StringUtils.stringifyException(e));
       System.err.println("testPartition() failed.");

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Mon Mar 15 21:49:45 2010
@@ -52,7 +52,6 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Order;
-import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.ql.DriverContext;
 import org.apache.hadoop.hive.ql.QueryPlan;
 import org.apache.hadoop.hive.ql.hooks.ReadEntity;
@@ -92,7 +91,7 @@ import org.apache.hadoop.hive.shims.Shim
 
 /**
  * DDLTask implementation.
- * 
+ *
  **/
 public class DDLTask extends Task<DDLWork> implements Serializable {
   private static final long serialVersionUID = 1L;
@@ -112,6 +111,7 @@ public class DDLTask extends Task<DDLWor
     this.conf = conf;
   }
 
+  @Override
   public int execute() {
 
     // Create the db
@@ -204,7 +204,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Add a partition to a table.
-   * 
+   *
    * @param db
    *          Database to add the partition to.
    * @param addPartitionDesc
@@ -250,12 +250,12 @@ public class DDLTask extends Task<DDLWor
       throw new HiveException("Cannot use ALTER TABLE on a non-native table");
     }
   }
-  
+
   /**
    * MetastoreCheck, see if the data in the metastore matches what is on the
    * dfs. Current version checks for tables and partitions that are either
    * missing on disk on in the metastore.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param msckDesc
@@ -334,7 +334,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Write the result of msck to a writer.
-   * 
+   *
    * @param result
    *          The result we're going to write
    * @param msg
@@ -368,7 +368,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Write a list of partitions to a file.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param showParts
@@ -389,15 +389,19 @@ public class DDLTask extends Task<DDLWor
       console.printError("Table " + tabName + " is not a partitioned table");
       return 1;
     }
-
-    parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl
-        .getTableName(), Short.MAX_VALUE);
+    if (showParts.getPartSpec() != null) {
+      parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME,
+          tbl.getTableName(), showParts.getPartSpec(), (short) -1);
+    } else {
+      parts = db.getPartitionNames(MetaStoreUtils.DEFAULT_DATABASE_NAME, tbl
+          .getTableName(), (short) -1);
+    }
 
     // write the results in the file
     try {
       Path resFile = new Path(showParts.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
-      DataOutput outStream = (DataOutput) fs.create(resFile);
+      DataOutput outStream = fs.create(resFile);
       Iterator<String> iterParts = parts.iterator();
 
       while (iterParts.hasNext()) {
@@ -421,7 +425,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Write a list of the tables in the database to a file.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param showTbls
@@ -445,7 +449,7 @@ public class DDLTask extends Task<DDLWor
     try {
       Path resFile = new Path(showTbls.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
-      DataOutput outStream = (DataOutput) fs.create(resFile);
+      DataOutput outStream = fs.create(resFile);
       SortedSet<String> sortedTbls = new TreeSet<String>(tbls);
       Iterator<String> iterTbls = sortedTbls.iterator();
 
@@ -469,7 +473,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Write a list of the user defined functions to a file.
-   * 
+   *
    * @param showFuncs
    *          are the functions we're interested in.
    * @return Returns 0 when execution succeeds and above 0 if it fails.
@@ -491,7 +495,7 @@ public class DDLTask extends Task<DDLWor
     try {
       Path resFile = new Path(showFuncs.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
-      DataOutput outStream = (DataOutput) fs.create(resFile);
+      DataOutput outStream = fs.create(resFile);
       SortedSet<String> sortedFuncs = new TreeSet<String>(funcs);
       Iterator<String> iterFuncs = sortedFuncs.iterator();
 
@@ -515,7 +519,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Shows a description of a function.
-   * 
+   *
    * @param descFunc
    *          is the function we are describing
    * @throws HiveException
@@ -527,7 +531,7 @@ public class DDLTask extends Task<DDLWor
     try {
       Path resFile = new Path(descFunc.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
-      DataOutput outStream = (DataOutput) fs.create(resFile);
+      DataOutput outStream = fs.create(resFile);
 
       // get the function documentation
       Description desc = null;
@@ -577,7 +581,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Write the status of tables to a file.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param showTblStatus
@@ -617,7 +621,7 @@ public class DDLTask extends Task<DDLWor
     try {
       Path resFile = new Path(showTblStatus.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
-      DataOutput outStream = (DataOutput) fs.create(resFile);
+      DataOutput outStream = fs.create(resFile);
 
       Iterator<Table> iterTables = tbls.iterator();
       while (iterTables.hasNext()) {
@@ -699,7 +703,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Write the description of a table to a file.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param descTbl
@@ -763,7 +767,7 @@ public class DDLTask extends Task<DDLWor
       }
       Path resFile = new Path(descTbl.getResFile());
       FileSystem fs = resFile.getFileSystem(conf);
-      DataOutput outStream = (DataOutput) fs.create(resFile);
+      DataOutput outStream = fs.create(resFile);
       Iterator<FieldSchema> iterCols = cols.iterator();
       while (iterCols.hasNext()) {
         // create a row per column
@@ -955,7 +959,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Alter a given table.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param alterTbl
@@ -1162,7 +1166,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Drop a given table.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param dropTbl
@@ -1267,7 +1271,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Create a new table.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param crtTbl
@@ -1322,14 +1326,14 @@ public class DDLTask extends Task<DDLWor
       Iterator<Entry<String, String>> iter = crtTbl.getMapProp().entrySet()
         .iterator();
       while (iter.hasNext()) {
-        Entry<String, String> m = (Entry<String, String>) iter.next();
+        Entry<String, String> m = iter.next();
         tbl.setSerdeParam(m.getKey(), m.getValue());
       }
     }
 
     /*
      * We use LazySimpleSerDe by default.
-     * 
+     *
      * If the user didn't specify a SerDe, and any of the columns are not simple
      * types, we will have to use DynamicSerDe instead.
      */
@@ -1423,7 +1427,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Create a new table like an existing table.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param crtTbl
@@ -1459,7 +1463,7 @@ public class DDLTask extends Task<DDLWor
 
   /**
    * Create a new view.
-   * 
+   *
    * @param db
    *          The database in question.
    * @param crtView
@@ -1503,6 +1507,7 @@ public class DDLTask extends Task<DDLWor
     return 0;
   }
 
+  @Override
   public int getType() {
     return StageType.DDL;
   }

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java Mon Mar 15 21:49:45 2010
@@ -19,12 +19,10 @@
 package org.apache.hadoop.hive.ql.metadata;
 
 import java.io.IOException;
-import java.net.URI;
 import java.util.AbstractMap;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
-import java.util.Properties;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -48,13 +46,9 @@ import org.apache.hadoop.hive.metastore.
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 import org.apache.hadoop.hive.ql.exec.Utilities;
-import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
-import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat;
 import org.apache.hadoop.hive.serde2.Deserializer;
 import org.apache.hadoop.hive.serde2.SerDeException;
 import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -64,7 +58,7 @@ import org.apache.thrift.TException;
  * The Hive class contains information about this instance of Hive. An instance
  * of Hive represents a set of data in a file system (usually HDFS) organized
  * for easy query processing
- * 
+ *
  */
 
 public class Hive {
@@ -93,12 +87,12 @@ public class Hive {
    * Gets hive object for the current thread. If one is not initialized then a
    * new one is created If the new configuration is different in metadata conf
    * vars then a new one is created.
-   * 
+   *
    * @param c
    *          new Hive Configuration
    * @return Hive object for current thread
    * @throws HiveException
-   * 
+   *
    */
   public static Hive get(HiveConf c) throws HiveException {
     boolean needsRefresh = false;
@@ -118,7 +112,7 @@ public class Hive {
 
   /**
    * get a connection to metastore. see get(HiveConf) function for comments
-   * 
+   *
    * @param c
    *          new conf
    * @param needsRefresh
@@ -152,10 +146,10 @@ public class Hive {
 
   /**
    * Hive
-   * 
+   *
    * @param argFsRoot
    * @param c
-   * 
+   *
    */
   private Hive(HiveConf c) throws HiveException {
     conf = c;
@@ -171,7 +165,7 @@ public class Hive {
 
   /**
    * Creates a table metdata and the directory for the table data
-   * 
+   *
    * @param tableName
    *          name of the table
    * @param columns
@@ -195,7 +189,7 @@ public class Hive {
 
   /**
    * Creates a table metdata and the directory for the table data
-   * 
+   *
    * @param tableName
    *          name of the table
    * @param columns
@@ -249,7 +243,7 @@ public class Hive {
 
   /**
    * Updates the existing table metadata with the new metadata.
-   * 
+   *
    * @param tblName
    *          name of the existing table
    * @param newTbl
@@ -272,7 +266,7 @@ public class Hive {
 
   /**
    * Updates the existing table metadata with the new metadata.
-   * 
+   *
    * @param tblName
    *          name of the existing table
    * @param newTbl
@@ -296,7 +290,7 @@ public class Hive {
 
   /**
    * Creates the table with the give objects
-   * 
+   *
    * @param tbl
    *          a table object
    * @throws HiveException
@@ -307,7 +301,7 @@ public class Hive {
 
   /**
    * Creates the table with the give objects
-   * 
+   *
    * @param tbl
    *          a table object
    * @param ifNotExists
@@ -334,7 +328,7 @@ public class Hive {
   /**
    * Drops table along with the data in it. If the table doesn't exist then it
    * is a no-op
-   * 
+   *
    * @param dbName
    *          database where the table lives
    * @param tableName
@@ -348,7 +342,7 @@ public class Hive {
 
   /**
    * Drops the table.
-   * 
+   *
    * @param tableName
    * @param deleteData
    *          deletes the underlying data along with metadata
@@ -376,7 +370,7 @@ public class Hive {
 
   /**
    * Returns metadata of the table.
-   * 
+   *
    * @param dbName
    *          the name of the database
    * @param tableName
@@ -393,7 +387,7 @@ public class Hive {
 
   /**
    * Returns metadata of the table
-   * 
+   *
    * @param dbName
    *          the name of the database
    * @param tableName
@@ -409,7 +403,7 @@ public class Hive {
     if (tableName == null || tableName.equals("")) {
       throw new HiveException("empty table creation??");
     }
-    
+
     // Get the table from metastore
     org.apache.hadoop.hive.metastore.api.Table tTable = null;
     try {
@@ -423,7 +417,7 @@ public class Hive {
     } catch (Exception e) {
       throw new HiveException("Unable to fetch table " + tableName, e);
     }
-    
+
     // For non-views, we need to do some extra fixes
     if (!TableType.VIRTUAL_VIEW.toString().equals(tTable.getTableType())) {
       // Fix the non-printable chars
@@ -436,7 +430,7 @@ public class Hive {
               Integer.toString(b[0]));
         }
       }
-      
+
       // Use LazySimpleSerDe for MetadataTypedColumnsetSerDe.
       // NOTE: LazySimpleSerDe does not support tables with a single column of
       // col
@@ -452,9 +446,9 @@ public class Hive {
             org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
       }
     }
-    
+
     Table table = new Table(tTable);
-    
+
     table.checkValidity();
     return table;
   }
@@ -466,7 +460,7 @@ public class Hive {
   /**
    * returns all existing tables from default database which match the given
    * pattern. The matching occurs as per Java regular expressions
-   * 
+   *
    * @param tablePattern
    *          java re pattern
    * @return list of table names
@@ -480,7 +474,7 @@ public class Hive {
   /**
    * returns all existing tables from the given database which match the given
    * pattern. The matching occurs as per Java regular expressions
-   * 
+   *
    * @param database
    *          the database name
    * @param tablePattern
@@ -528,7 +522,7 @@ public class Hive {
    * the partition with the contents of loadPath. - If he partition does not
    * exist - one is created - files in loadPath are moved into Hive. But the
    * directory itself is not removed.
-   * 
+   *
    * @param loadPath
    *          Directory containing files to load into Table
    * @param tableName
@@ -597,7 +591,7 @@ public class Hive {
    * the contents of loadPath. - If table does not exist - an exception is
    * thrown - files in loadPath are moved into Hive. But the directory itself is
    * not removed.
-   * 
+   *
    * @param loadPath
    *          Directory containing files to load into Table
    * @param tableName
@@ -620,7 +614,7 @@ public class Hive {
 
   /**
    * Creates a partition.
-   * 
+   *
    * @param tbl
    *          table for which partition needs to be created
    * @param partSpec
@@ -636,7 +630,7 @@ public class Hive {
 
   /**
    * Creates a partition
-   * 
+   *
    * @param tbl
    *          table for which partition needs to be created
    * @param partSpec
@@ -673,7 +667,7 @@ public class Hive {
 
   /**
    * Returns partition metadata
-   * 
+   *
    * @param tbl
    *          the partition's table
    * @param partSpec
@@ -730,7 +724,7 @@ public class Hive {
 
   public List<String> getPartitionNames(String dbName, String tblName, short max)
       throws HiveException {
-    List names = null;
+    List<String> names = null;
     try {
       names = getMSC().listPartitionNames(dbName, tblName, max);
     } catch (Exception e) {
@@ -740,9 +734,25 @@ public class Hive {
     return names;
   }
 
+  public List<String> getPartitionNames(String dbName, String tblName,
+      Map<String, String> partSpec, short max) throws HiveException {
+    List<String> names = null;
+    Table t = getTable(dbName, tblName);
+
+    List<String> pvals = getPvals(t.getPartCols(), partSpec);
+
+    try {
+      names = getMSC().listPartitionNames(dbName, tblName, pvals, max);
+    } catch (Exception e) {
+      LOG.error(StringUtils.stringifyException(e));
+      throw new HiveException(e);
+    }
+    return names;
+  }
+
   /**
    * get all the partitions that the table has
-   * 
+   *
    * @param tbl
    *          object for which partition is needed
    * @return list of partition objects
@@ -771,6 +781,54 @@ public class Hive {
     }
   }
 
+  private static List<String> getPvals(List<FieldSchema> partCols,
+      Map<String, String> partSpec) {
+    List<String> pvals = new ArrayList<String>();
+    for (FieldSchema field : partCols) {
+      String val = partSpec.get(field.getName());
+      if (val == null) {
+        val = "";
+      }
+      pvals.add(val);
+    }
+    return pvals;
+  }
+
+  /**
+   * get all the partitions of the table that matches the given partial
+   * specification. partition columns whose value is can be anything should be
+   * an empty string.
+   *
+   * @param tbl
+   *          object for which partition is needed. Must be partitioned.
+   * @return list of partition objects
+   * @throws HiveException
+   */
+  public List<Partition> getPartitions(Table tbl, Map<String, String> partialPartSpec)
+  throws HiveException {
+    if (!tbl.isPartitioned()) {
+      throw new HiveException("Partition spec should only be supplied for a " +
+      		"partitioned table");
+    }
+
+    List<String> partialPvals = getPvals(tbl.getPartCols(), partialPartSpec);
+
+    List<org.apache.hadoop.hive.metastore.api.Partition> partitions = null;
+    try {
+      partitions = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(),
+          partialPvals, (short) -1);
+    } catch (Exception e) {
+      throw new HiveException(e);
+    }
+
+    List<Partition> qlPartitions = new ArrayList<Partition>();
+    for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) {
+      qlPartitions.add( new Partition(tbl, p));
+    }
+
+    return qlPartitions;
+  }
+
   static private void checkPaths(FileSystem fs, FileStatus[] srcs, Path destf,
       boolean replace) throws HiveException {
     try {
@@ -849,7 +907,7 @@ public class Hive {
   /**
    * Replaces files in the partition with new data set specifed by srcf. Works
    * by moving files
-   * 
+   *
    * @param srcf
    *          Files to be moved. Leaf Directories or Globbed File Paths
    * @param destf
@@ -916,7 +974,7 @@ public class Hive {
   /**
    * Creates a metastore client. Currently it creates only JDBC based client as
    * File based store support is removed
-   * 
+   *
    * @returns a Meta Store Client
    * @throws HiveMetaException
    *           if a working client can't be created
@@ -951,7 +1009,7 @@ public class Hive {
   }
 
   /**
-   * 
+   *
    * @return the metastore client for the current thread
    * @throws MetaException
    */

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Mon Mar 15 21:49:45 2010
@@ -273,7 +273,7 @@ public class DDLSemanticAnalyzer extends
   /**
    * Get the fully qualified name in the ast. e.g. the ast of the form ^(DOT
    * ^(DOT a b) c) will generate a name of the form a.b.c
-   * 
+   *
    * @param ast
    *          The AST from which the qualified name has to be extracted
    * @return String
@@ -289,7 +289,7 @@ public class DDLSemanticAnalyzer extends
 
   /**
    * Create a FetchTask for a given table and thrift ddl schema.
-   * 
+   *
    * @param tablename
    *          tablename
    * @param schema
@@ -346,7 +346,14 @@ public class DDLSemanticAnalyzer extends
   private void analyzeShowPartitions(ASTNode ast) throws SemanticException {
     ShowPartitionsDesc showPartsDesc;
     String tableName = unescapeIdentifier(ast.getChild(0).getText());
-    showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile());
+    List<Map<String, String>> partSpecs = getPartitionSpecs(ast);
+    // We only can have a single partition spec
+    assert(partSpecs.size() <= 1);
+    Map<String, String> partSpec = null;
+    if(partSpecs.size() > 0) {
+      partSpec = partSpecs.get(0);
+    }
+    showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile(), partSpec);
     rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
         showPartsDesc), conf));
     setFetchTask(createFetchTask(showPartsDesc.getSchema()));
@@ -396,7 +403,7 @@ public class DDLSemanticAnalyzer extends
   /**
    * Add the task according to the parsed command tree. This is used for the CLI
    * command "SHOW FUNCTIONS;".
-   * 
+   *
    * @param ast
    *          The parsed command tree.
    * @throws SemanticException
@@ -418,7 +425,7 @@ public class DDLSemanticAnalyzer extends
   /**
    * Add the task according to the parsed command tree. This is used for the CLI
    * command "DESCRIBE FUNCTION;".
-   * 
+   *
    * @param ast
    *          The parsed command tree.
    * @throws SemanticException
@@ -508,7 +515,7 @@ public class DDLSemanticAnalyzer extends
   /**
    * Add one or more partitions to a table. Useful when the data has been copied
    * to the right location by some other process.
-   * 
+   *
    * @param ast
    *          The parsed command tree.
    * @throws SemanticException
@@ -568,7 +575,7 @@ public class DDLSemanticAnalyzer extends
   /**
    * Verify that the information in the metastore matches up with the data on
    * the fs.
-   * 
+   *
    * @param ast
    *          Query tree.
    * @throws SemanticException
@@ -593,7 +600,7 @@ public class DDLSemanticAnalyzer extends
 
   /**
    * Get the partition specs from the tree.
-   * 
+   *
    * @param ast
    *          Tree to extract partitions from.
    * @return A list of partition name to value mappings.

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/Hive.g Mon Mar 15 21:49:45 2010
@@ -391,7 +391,7 @@ showStatement
 @after { msgs.pop(); }
     : KW_SHOW KW_TABLES showStmtIdentifier?  -> ^(TOK_SHOWTABLES showStmtIdentifier?)
     | KW_SHOW KW_FUNCTIONS showStmtIdentifier?  -> ^(TOK_SHOWFUNCTIONS showStmtIdentifier?)
-    | KW_SHOW KW_PARTITIONS Identifier -> ^(TOK_SHOWPARTITIONS Identifier)
+    | KW_SHOW KW_PARTITIONS Identifier partitionSpec? -> ^(TOK_SHOWPARTITIONS Identifier partitionSpec?)
     | KW_SHOW KW_TABLE KW_EXTENDED ((KW_FROM|KW_IN) db_name=Identifier)? KW_LIKE showStmtIdentifier partitionSpec?
     -> ^(TOK_SHOW_TABLESTATUS showStmtIdentifier $db_name? partitionSpec?)
     ;

Modified: hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java?rev=923454&r1=923453&r2=923454&view=diff
==============================================================================
--- hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java (original)
+++ hadoop/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java Mon Mar 15 21:49:45 2010
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hive.ql.plan;
 
 import java.io.Serializable;
+import java.util.Map;
 
 import org.apache.hadoop.fs.Path;
 
@@ -31,6 +32,9 @@ public class ShowPartitionsDesc extends 
   private static final long serialVersionUID = 1L;
   String tabName;
   String resFile;
+  // Filter the partitions to show based on on supplied spec
+  Map<String, String> partSpec;
+
   /**
    * table name for the result of show tables.
    */
@@ -57,9 +61,11 @@ public class ShowPartitionsDesc extends 
    * @param resFile
    *          File to store the results in
    */
-  public ShowPartitionsDesc(String tabName, Path resFile) {
+  public ShowPartitionsDesc(String tabName, Path resFile,
+      Map<String, String> partSpec) {
     this.tabName = tabName;
     this.resFile = resFile.toString();
+    this.partSpec = partSpec;
   }
 
   /**
@@ -79,6 +85,22 @@ public class ShowPartitionsDesc extends 
   }
 
   /**
+   * @return the name of the table.
+   */
+  @Explain(displayName = "partSpec")
+  public Map<String, String> getPartSpec() {
+    return partSpec;
+  }
+
+  /**
+   * @param tabName
+   *          the table whose partitions have to be listed
+   */
+  public void setPartSpec(Map<String, String> partSpec) {
+    this.partSpec = partSpec;
+  }
+
+  /**
    * @return the results file
    */
   @Explain(displayName = "result file", normalExplain = false)

Added: hadoop/hive/trunk/ql/src/test/queries/clientpositive/show_partitions.q
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/queries/clientpositive/show_partitions.q?rev=923454&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/queries/clientpositive/show_partitions.q (added)
+++ hadoop/hive/trunk/ql/src/test/queries/clientpositive/show_partitions.q Mon Mar 15 21:49:45 2010
@@ -0,0 +1,4 @@
+SHOW PARTITIONS srcpart;
+SHOW PARTITIONS srcpart PARTITION(hr='11');
+SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08');
+SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08', hr='12');
\ No newline at end of file

Added: hadoop/hive/trunk/ql/src/test/results/clientpositive/show_partitions.q.out
URL: http://svn.apache.org/viewvc/hadoop/hive/trunk/ql/src/test/results/clientpositive/show_partitions.q.out?rev=923454&view=auto
==============================================================================
--- hadoop/hive/trunk/ql/src/test/results/clientpositive/show_partitions.q.out (added)
+++ hadoop/hive/trunk/ql/src/test/results/clientpositive/show_partitions.q.out Mon Mar 15 21:49:45 2010
@@ -0,0 +1,25 @@
+PREHOOK: query: SHOW PARTITIONS srcpart
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: SHOW PARTITIONS srcpart
+POSTHOOK: type: SHOWPARTITIONS
+ds=2008-04-08/hr=11
+ds=2008-04-08/hr=12
+ds=2008-04-09/hr=11
+ds=2008-04-09/hr=12
+PREHOOK: query: SHOW PARTITIONS srcpart PARTITION(hr='11')
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: SHOW PARTITIONS srcpart PARTITION(hr='11')
+POSTHOOK: type: SHOWPARTITIONS
+ds=2008-04-08/hr=11
+ds=2008-04-09/hr=11
+PREHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08')
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08')
+POSTHOOK: type: SHOWPARTITIONS
+ds=2008-04-08/hr=11
+ds=2008-04-08/hr=12
+PREHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08', hr='12')
+PREHOOK: type: SHOWPARTITIONS
+POSTHOOK: query: SHOW PARTITIONS srcpart PARTITION(ds='2008-04-08', hr='12')
+POSTHOOK: type: SHOWPARTITIONS
+ds=2008-04-08/hr=12



Mime
View raw message