http://git-wip-us.apache.org/repos/asf/incubator-lens/blob/7de74eb3/lens-server/src/main/java/org/apache/lens/server/metastore/CubeMetastoreServiceImpl.java
----------------------------------------------------------------------
diff --git 
a/lens-server/src/main/java/org/apache/lens/server/metastore/CubeMetastoreServiceImpl.java
 
b/lens-server/src/main/java/org/apache/lens/server/metastore/CubeMetastoreServiceImpl.java
index c5e1c14..646de8c 100644
--- 
a/lens-server/src/main/java/org/apache/lens/server/metastore/CubeMetastoreServiceImpl.java
+++ 
b/lens-server/src/main/java/org/apache/lens/server/metastore/CubeMetastoreServiceImpl.java
@@ -327,7 +327,7 @@ public class CubeMetastoreServiceImpl extends LensService 
implements CubeMetasto
       if (dimTable.getStorages() != null && !dimTable.getStorages().isEmpty()) 
{
         for (String storageName : dimTable.getStorages()) {
           XStorageTableElement tblElement = 
JAXBUtils.getXStorageTableFromHiveTable(
-            
msClient.getHiveTable(MetastoreUtil.getDimStorageTableName(dimTblName, 
storageName)));
+            
msClient.getHiveTable(MetastoreUtil.getFactOrDimtableStorageTableName(dimTblName,
 storageName)));
           tblElement.setStorageName(storageName);
           UpdatePeriod p = dimTable.getSnapshotDumpPeriods().get(storageName);
           if (p != null) {
@@ -491,7 +491,7 @@ public class CubeMetastoreServiceImpl extends LensService 
implements CubeMetasto
       for (String storageName : cft.getStorages()) {
         Set<UpdatePeriod> updatePeriods = 
cft.getUpdatePeriods().get(storageName);
         XStorageTableElement tblElement = 
JAXBUtils.getXStorageTableFromHiveTable(
-          msClient.getHiveTable(MetastoreUtil.getFactStorageTableName(fact, 
storageName)));
+          
msClient.getHiveTable(MetastoreUtil.getFactOrDimtableStorageTableName(fact, 
storageName)));
         tblElement.setStorageName(storageName);
         for (UpdatePeriod p : updatePeriods) {
           
tblElement.getUpdatePeriods().getUpdatePeriod().add(XUpdatePeriod.valueOf(p.name()));
@@ -605,7 +605,7 @@ public class CubeMetastoreServiceImpl extends LensService 
implements CubeMetasto
       CubeFactTable factTable = msClient.getFactTable(fact);
       Set<UpdatePeriod> updatePeriods = 
factTable.getUpdatePeriods().get(storageName);
       XStorageTableElement tblElement = 
JAXBUtils.getXStorageTableFromHiveTable(
-        msClient.getHiveTable(MetastoreUtil.getFactStorageTableName(fact, 
storageName)));
+        
msClient.getHiveTable(MetastoreUtil.getFactOrDimtableStorageTableName(fact, 
storageName)));
       tblElement.setStorageName(storageName);
       for (UpdatePeriod p : updatePeriods) {
         
tblElement.getUpdatePeriods().getUpdatePeriod().add(XUpdatePeriod.valueOf(p.name()));
@@ -625,7 +625,7 @@ public class CubeMetastoreServiceImpl extends LensService 
implements CubeMetasto
       CubeMetastoreClient msClient = getClient(sessionid);
       CubeDimensionTable dimTable = msClient.getDimensionTable(dimTblName);
       XStorageTableElement tblElement = 
JAXBUtils.getXStorageTableFromHiveTable(
-        msClient.getHiveTable(MetastoreUtil.getDimStorageTableName(dimTblName, 
storageName)));
+        
msClient.getHiveTable(MetastoreUtil.getFactOrDimtableStorageTableName(dimTblName,
 storageName)));
       tblElement.setStorageName(storageName);
       UpdatePeriod p = dimTable.getSnapshotDumpPeriods().get(storageName);
       if (p != null) {
@@ -696,17 +696,20 @@ public class CubeMetastoreServiceImpl extends LensService 
implements CubeMetasto
     try {
       acquire(sessionid);
       checkFactStorage(sessionid, fact, storageName);
-      String storageTableName = MetastoreUtil.getFactStorageTableName(fact, 
storageName);
-      List<Partition> parts = 
getClient(sessionid).getPartitionsByFilter(storageTableName, filter);
+      CubeMetastoreClient client = getClient(sessionid);
+      String storageTableName = 
MetastoreUtil.getFactOrDimtableStorageTableName(fact,
+        storageName);
+      List<Partition> parts = client.getPartitionsByFilter(storageTableName, 
filter);
+      List<String> timePartCols = 
client.getTimePartColNamesOfTable(storageTableName);
       if (parts != null) {
         List<XPartition> result = new ArrayList<XPartition>(parts.size());
         for (Partition p : parts) {
-          XPartition xp = JAXBUtils.xpartitionFromPartition(p);
+          XPartition xp = JAXBUtils.xpartitionFromPartition(p, timePartCols);
           result.add(xp);
         }
         return result;
       } else {
-        return new ArrayList<XPartition>();
+        return new ArrayList<>();
       }
     } catch (HiveException exc) {
       throw new LensException(exc);
@@ -767,13 +770,15 @@ public class CubeMetastoreServiceImpl extends LensService 
implements CubeMetasto
     try {
       acquire(sessionid);
       checkDimensionStorage(sessionid, dimension, storageName);
-      String storageTableName = MetastoreUtil.getDimStorageTableName(dimension,
+      CubeMetastoreClient client = getClient(sessionid);
+      String storageTableName = 
MetastoreUtil.getFactOrDimtableStorageTableName(dimension,
         storageName);
-      List<Partition> partitions = 
getClient(sessionid).getPartitionsByFilter(storageTableName, filter);
+      List<Partition> partitions = 
client.getPartitionsByFilter(storageTableName, filter);
+      List<String> timePartCols = 
client.getTimePartColNamesOfTable(storageTableName);
       if (partitions != null) {
         List<XPartition> result = new ArrayList<XPartition>(partitions.size());
         for (Partition p : partitions) {
-          XPartition xp = JAXBUtils.xpartitionFromPartition(p);
+          XPartition xp = JAXBUtils.xpartitionFromPartition(p, timePartCols);
           result.add(xp);
         }
         return result;
@@ -805,6 +810,46 @@ public class CubeMetastoreServiceImpl extends LensService 
implements CubeMetasto
   }
 
   @Override
+  public void updatePartition(LensSessionHandle sessionid, String tblName, 
String storageName,
+    XPartition xPartition) throws LensException {
+    try {
+      acquire(sessionid);
+      CubeMetastoreClient client = getClient(sessionid);
+      String storageTableName = 
MetastoreUtil.getFactOrDimtableStorageTableName(tblName, storageName);
+      Partition existingPartition = 
client.getPartitionByFilter(storageTableName,
+        
StorageConstants.getPartFilter(JAXBUtils.getFullPartSpecAsMap(xPartition)));
+      JAXBUtils.updatePartitionFromXPartition(existingPartition, xPartition);
+      client.updatePartition(tblName, storageName, existingPartition);
+    } catch (HiveException | ClassNotFoundException |InvalidOperationException 
| UnsupportedOperationException exc) {
+      throw new LensException(exc);
+    } finally {
+      release(sessionid);
+    }
+  }
+
+  @Override
+  public void updatePartitions(LensSessionHandle sessionid, String tblName, 
String storageName,
+    XPartitionList xPartitions) throws LensException {
+    try {
+      acquire(sessionid);
+      CubeMetastoreClient client = getClient(sessionid);
+      String storageTableName = 
MetastoreUtil.getFactOrDimtableStorageTableName(tblName, storageName);
+      List<Partition> partitionsToUpdate = new 
ArrayList<>(xPartitions.getPartition().size());
+      for (XPartition xPartition : xPartitions.getPartition()) {
+        Partition existingPartition = 
client.getPartitionByFilter(storageTableName,
+          
StorageConstants.getPartFilter(JAXBUtils.getFullPartSpecAsMap(xPartition)));
+        JAXBUtils.updatePartitionFromXPartition(existingPartition, xPartition);
+        partitionsToUpdate.add(existingPartition);
+      }
+      client.updatePartitions(tblName, storageName, partitionsToUpdate);
+    } catch (HiveException | ClassNotFoundException | 
InvalidOperationException exc) {
+      throw new LensException(exc);
+    } finally {
+      release(sessionid);
+    }
+  }
+
+  @Override
   public void addPartitionsToDimStorage(LensSessionHandle sessionid,
     String dimTblName, String storageName, XPartitionList partitions) throws 
LensException {
     try {
@@ -1411,7 +1456,7 @@ public class CubeMetastoreServiceImpl extends LensService 
implements CubeMetasto
         throw new BadRequestException("Can't get join chains. '"
           + tableName + "' is neither a cube nor a dimension");
       }
-      XJoinChains xJoinChains= new XJoinChains();
+      XJoinChains xJoinChains = new XJoinChains();
       List<XJoinChain> joinChains = xJoinChains.getJoinChain();
       if (chains != null) {
         for (JoinChain chain : chains) {

http://git-wip-us.apache.org/repos/asf/incubator-lens/blob/7de74eb3/lens-server/src/main/java/org/apache/lens/server/metastore/JAXBUtils.java
----------------------------------------------------------------------
diff --git 
a/lens-server/src/main/java/org/apache/lens/server/metastore/JAXBUtils.java 
b/lens-server/src/main/java/org/apache/lens/server/metastore/JAXBUtils.java
index 811fd32..2d0eba2 100644
--- a/lens-server/src/main/java/org/apache/lens/server/metastore/JAXBUtils.java
+++ b/lens-server/src/main/java/org/apache/lens/server/metastore/JAXBUtils.java
@@ -33,14 +33,16 @@ import org.apache.lens.cube.metadata.ExprColumn.ExprSpec;
 import org.apache.hadoop.hive.metastore.TableType;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.hadoop.hive.metastore.api.Order;
+import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
 import org.apache.hadoop.hive.ql.metadata.HiveException;
 import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 import org.apache.hadoop.hive.ql.parse.ParseException;
 import org.apache.hadoop.hive.serde.serdeConstants;
+import org.apache.hadoop.mapred.InputFormat;
 
 import com.google.common.base.Optional;
-
+import com.google.common.collect.Maps;
 import lombok.extern.slf4j.Slf4j;
 
 /**
@@ -783,9 +785,8 @@ public final class JAXBUtils {
     return nonTimePartSpec;
   }
 
-  public static XPartition xpartitionFromPartition(Partition p) throws 
HiveException {
+  public static XPartition xpartitionFromPartition(Partition p, List<String> 
timePartCols) throws HiveException {
     XPartition xp = new XPartition();
-    xp.setFullPartitionSpec(new XPartSpec());
     xp.setPartitionParameters(new XProperties());
     xp.setSerdeParameters(new XProperties());
     xp.setName(p.getCompleteName());
@@ -795,18 +796,58 @@ public final class JAXBUtils {
     
xp.getPartitionParameters().getProperty().addAll(xPropertiesFromMap(p.getParameters()));
     String upParam = 
p.getParameters().get(MetastoreConstants.PARTITION_UPDATE_PERIOD);
     xp.setUpdatePeriod(XUpdatePeriod.valueOf(upParam));
-    for (Map.Entry<String, String> entry : p.getSpec().entrySet()) {
+    LinkedHashMap<String, String> partSpec = p.getSpec();
+    xp.setFullPartitionSpec(new XPartSpec());
+    for (Map.Entry<String, String> entry : partSpec.entrySet()) {
       XPartSpecElement e = new XPartSpecElement();
       e.setKey(entry.getKey());
       e.setValue(entry.getValue());
       xp.getFullPartitionSpec().getPartSpecElement().add(e);
     }
+    try {
+      xp.setTimePartitionSpec(new XTimePartSpec());
+      xp.setNonTimePartitionSpec(new XPartSpec());
+      for (Map.Entry<String, String> entry : partSpec.entrySet()) {
+        if (timePartCols.contains(entry.getKey())) {
+          XTimePartSpecElement timePartSpecElement = new 
XTimePartSpecElement();
+          timePartSpecElement.setKey(entry.getKey());
+          timePartSpecElement
+            
.setValue(getXMLGregorianCalendar(UpdatePeriod.valueOf(xp.getUpdatePeriod().name()).format().parse(
+              entry.getValue())));
+          
xp.getTimePartitionSpec().getPartSpecElement().add(timePartSpecElement);
+        } else {
+          XPartSpecElement partSpecElement = new XPartSpecElement();
+          partSpecElement.setKey(entry.getKey());
+          partSpecElement.setValue(entry.getValue());
+          
xp.getNonTimePartitionSpec().getPartSpecElement().add(partSpecElement);
+        }
+      }
+    } catch (java.text.ParseException exc) {
+      log.debug("can't form time part spec from " + partSpec, exc);
+      xp.setTimePartitionSpec(null);
+      xp.setNonTimePartitionSpec(null);
+    }
     
xp.setSerdeClassname(p.getTPartition().getSd().getSerdeInfo().getSerializationLib());
     xp.getSerdeParameters().getProperty().addAll(xPropertiesFromMap(
       p.getTPartition().getSd().getSerdeInfo().getParameters()));
     return xp;
   }
 
+  public static void updatePartitionFromXPartition(Partition partition, 
XPartition xp)
+    throws ClassNotFoundException, HiveException {
+    
partition.getParameters().putAll(mapFromXProperties(xp.getPartitionParameters()));
+    
partition.getTPartition().getSd().getSerdeInfo().setParameters(mapFromXProperties(xp.getSerdeParameters()));
+    partition.setLocation(xp.getLocation());
+    if (xp.getInputFormat() != null) {
+      partition.setInputFormatClass((Class<? extends InputFormat>) 
Class.forName(xp.getInputFormat()));
+    }
+    if (xp.getOutputFormat() != null) {
+      partition.setOutputFormatClass((Class<? extends HiveOutputFormat>) 
Class.forName(xp.getOutputFormat()));
+    }
+    partition.getParameters().put(MetastoreConstants.PARTITION_UPDATE_PERIOD, 
xp.getUpdatePeriod().name());
+    
partition.getTPartition().getSd().getSerdeInfo().setSerializationLib(xp.getSerdeClassname());
+  }
+
   public static StoragePartitionDesc storagePartSpecFromXPartition(
     XPartition xpart) {
     StoragePartitionDesc partDesc = new StoragePartitionDesc(
@@ -893,4 +934,20 @@ public final class JAXBUtils {
     xtable.setTableType(table.getTableType().name());
     return xtable;
   }
+
+  public static Map<String, String> getFullPartSpecAsMap(XPartition partition) 
{
+    Map<String, String> spec = Maps.newHashMap();
+    if (partition.getTimePartitionSpec() != null) {
+      for (XTimePartSpecElement timePartSpecElement : 
partition.getTimePartitionSpec().getPartSpecElement()) {
+        spec.put(timePartSpecElement.getKey(), 
UpdatePeriod.valueOf(partition.getUpdatePeriod().name()).format()
+          .format(getDateFromXML(timePartSpecElement.getValue())));
+      }
+    }
+    if (partition.getNonTimePartitionSpec() != null) {
+      for (XPartSpecElement partSpecElement : 
partition.getNonTimePartitionSpec().getPartSpecElement()) {
+        spec.put(partSpecElement.getKey(), partSpecElement.getValue());
+      }
+    }
+    return spec;
+  }
 }

http://git-wip-us.apache.org/repos/asf/incubator-lens/blob/7de74eb3/lens-server/src/main/java/org/apache/lens/server/metastore/MetastoreResource.java
----------------------------------------------------------------------
diff --git 
a/lens-server/src/main/java/org/apache/lens/server/metastore/MetastoreResource.java
 
b/lens-server/src/main/java/org/apache/lens/server/metastore/MetastoreResource.java
index bfe4813..cb9516b 100644
--- 
a/lens-server/src/main/java/org/apache/lens/server/metastore/MetastoreResource.java
+++ 
b/lens-server/src/main/java/org/apache/lens/server/metastore/MetastoreResource.java
@@ -363,7 +363,7 @@ public class MetastoreResource {
    * @param cubeName  The cube name
    * @param cube      The {@link XCube} representation of the updated cube 
definition
    * @return {@link APIResult} with state {@link Status#SUCCEEDED}, if update 
was successful. {@link APIResult} with
-   * state {@link Status#FAILED}, if udpate has failed
+   * state {@link Status#FAILED}, if update has failed
    */
   @PUT
   @Path("/cubes/{cubeName}")
@@ -870,7 +870,7 @@ public class MetastoreResource {
    * @param factName  name of the fact table
    * @param fact      The {@link XFactTable} representation of the updated 
fact table definition
    * @return {@link APIResult} with state {@link Status#SUCCEEDED}, if update 
was successful. {@link APIResult} with
-   * state {@link Status#FAILED}, if udpate has failed
+   * state {@link Status#FAILED}, if update has failed
    */
   @PUT
   @Path("/facts/{factName}")
@@ -1108,6 +1108,34 @@ public class MetastoreResource {
     }
     return SUCCESS;
   }
+  /**
+   * updates an existing partition for a storage of fact
+   *
+   * @param sessionid The sessionid in which user is working
+   * @param factName  fact table name
+   * @param storage   storage name
+   * @param partition {@link XPartition} representation of partition.
+   * @return {@link APIResult} with state {@link Status#SUCCEEDED}, if update 
was successful.
+   * {@link APIResult} with state
+   * {@link Status#FAILED}, if update has failed
+   */
+  @PUT
+  @Path("/facts/{factName}/storages/{storage}/partition")
+  public APIResult updatePartitionOfFactStorage(@QueryParam("sessionid") 
LensSessionHandle sessionid,
+    @PathParam("factName") String factName,
+    @PathParam("storage") String storage,
+    XPartition partition) {
+    checkSessionId(sessionid);
+    checkNonNullArgs("Partition is null", partition);
+    try {
+      getSvc().updatePartition(sessionid, factName, storage, partition);
+    } catch (LensException exc) {
+      checkTableNotFound(exc, factName);
+      LOG.error("Error adding partition to storage of fact" + factName + ":" + 
storage, exc);
+      return new APIResult(Status.FAILED, exc.getMessage());
+    }
+    return SUCCESS;
+  }
 
   /**
    * Batch Add partitions for a storage of fact
@@ -1136,6 +1164,34 @@ public class MetastoreResource {
     }
     return SUCCESS;
   }
+  /**
+   * Batch Update partitions for a storage of fact
+   *
+   * @param sessionid  The sessionid in which user is working
+   * @param factName   fact table name
+   * @param storage    storage name
+   * @param partitions {@link XPartitionList} representation of partitions
+   * @return {@link APIResult} with state {@link Status#SUCCEEDED}, if update 
was successful.
+   * {@link APIResult} with state
+   * {@link Status#FAILED}, if update has failed
+   */
+  @PUT
+  @Path("/facts/{factName}/storages/{storage}/partitions")
+  public APIResult updatePartitionsOfFactStorage(@QueryParam("sessionid") 
LensSessionHandle sessionid,
+    @PathParam("factName") String factName,
+    @PathParam("storage") String storage,
+    XPartitionList partitions) {
+    checkSessionId(sessionid);
+    checkNonNullArgs("Partition List is null", partitions);
+    try {
+      getSvc().updatePartitions(sessionid, factName, storage, partitions);
+    } catch (LensException exc) {
+      checkTableNotFound(exc, factName);
+      LOG.error("Error adding partition to storage of fact" + factName + ":" + 
storage, exc);
+      return new APIResult(Status.FAILED, exc.getMessage());
+    }
+    return SUCCESS;
+  }
 
   /**
    * Drop the partitions in the storage of a fact table, specified by exact 
values
@@ -1206,7 +1262,7 @@ public class MetastoreResource {
    * @param sessionid      The sessionid in which user is working
    * @param dimensionTable The {@link XDimensionTable} representation of the 
updated dim table definition
    * @return {@link APIResult} with state {@link Status#SUCCEEDED}, if update 
was successful. {@link APIResult} with
-   * state {@link Status#FAILED}, if udpate has failed
+   * state {@link Status#FAILED}, if update has failed
    */
   @PUT
   @Path("/dimtables/{dimTableName}")
@@ -1481,6 +1537,33 @@ public class MetastoreResource {
     }
     return SUCCESS;
   }
+  /**
+   * Updates an existing partition for a storage of dimension
+   *
+   * @param sessionid    The sessionid in which user is working
+   * @param dimTableName dimension table name
+   * @param storage      storage name
+   * @param partition    {@link XPartition} representation of partition
+   * @return {@link APIResult} with state {@link Status#SUCCEEDED}, if update 
was successful.
+   * {@link APIResult} with state
+   * {@link Status#FAILED}, if update has failed
+   */
+  @PUT
+  @Path("/dimtables/{dimTableName}/storages/{storage}/partition")
+  public APIResult updatePartitionOfDimStorage(@QueryParam("sessionid") 
LensSessionHandle sessionid,
+    @PathParam("dimTableName") String dimTableName,
+    @PathParam("storage") String storage,
+    XPartition partition) {
+    checkSessionId(sessionid);
+    checkNonNullArgs("Partition is null", partition);
+    try {
+      getSvc().updatePartition(sessionid, dimTableName, storage, partition);
+    } catch (LensException exc) {
+      LOG.error("Error adding partition to storage of dimension table " + 
dimTableName + ":" + storage, exc);
+      return new APIResult(Status.FAILED, exc.getMessage());
+    }
+    return SUCCESS;
+  }
 
   /**
    * Add new partitions for a storage of dimension
@@ -1508,6 +1591,33 @@ public class MetastoreResource {
     }
     return SUCCESS;
   }
+  /**
+   * Add new partitions for a storage of dimension
+   *
+   * @param sessionid    The sessionid in which user is working
+   * @param dimTableName dimension table name
+   * @param storage      storage name
+   * @param partitions   {@link XPartitionList} representation of list of 
partitions
+   * @return {@link APIResult} with state {@link Status#SUCCEEDED}, if update 
was successful.
+   * {@link APIResult} with state
+   * {@link Status#FAILED}, if update has failed
+   */
+  @PUT
+  @Path("/dimtables/{dimTableName}/storages/{storage}/partitions")
+  public APIResult updatePartitionsOfDimStorage(@QueryParam("sessionid") 
LensSessionHandle sessionid,
+    @PathParam("dimTableName") String dimTableName,
+    @PathParam("storage") String storage,
+    XPartitionList partitions) {
+    checkSessionId(sessionid);
+    checkNonNullArgs("Partition list is null", partitions);
+    try {
+      getSvc().updatePartitions(sessionid, dimTableName, storage, partitions);
+    } catch (LensException exc) {
+      LOG.error("Error adding partition to storage of dimension table " + 
dimTableName + ":" + storage, exc);
+      return new APIResult(Status.FAILED, exc.getMessage());
+    }
+    return SUCCESS;
+  }
 
   /**
    * Get flattened list of columns reachable from a cube or a dimension

http://git-wip-us.apache.org/repos/asf/incubator-lens/blob/7de74eb3/lens-server/src/test/java/org/apache/lens/server/metastore/TestMetastoreService.java
----------------------------------------------------------------------
diff --git 
a/lens-server/src/test/java/org/apache/lens/server/metastore/TestMetastoreService.java
 
b/lens-server/src/test/java/org/apache/lens/server/metastore/TestMetastoreService.java
index dc1dc1d..4eb52b9 100644
--- 
a/lens-server/src/test/java/org/apache/lens/server/metastore/TestMetastoreService.java
+++ 
b/lens-server/src/test/java/org/apache/lens/server/metastore/TestMetastoreService.java
@@ -18,6 +18,8 @@
  */
 package org.apache.lens.server.metastore;
 
+import static org.apache.lens.cube.metadata.UpdatePeriod.*;
+
 import static org.testng.Assert.*;
 
 import java.util.*;
@@ -27,19 +29,13 @@ import javax.ws.rs.NotFoundException;
 import javax.ws.rs.client.Entity;
 import javax.ws.rs.client.Invocation;
 import javax.ws.rs.client.WebTarget;
-import javax.ws.rs.core.Application;
-import javax.ws.rs.core.GenericType;
-import javax.ws.rs.core.MediaType;
-import javax.ws.rs.core.Response;
+import javax.ws.rs.core.*;
 import javax.xml.bind.JAXBElement;
 import javax.xml.datatype.DatatypeFactory;
 import javax.xml.datatype.XMLGregorianCalendar;
 
-import org.apache.lens.api.APIResult;
+import org.apache.lens.api.*;
 import org.apache.lens.api.APIResult.Status;
-import org.apache.lens.api.DateTime;
-import org.apache.lens.api.LensSessionHandle;
-import org.apache.lens.api.StringList;
 import org.apache.lens.api.metastore.*;
 import org.apache.lens.cube.metadata.*;
 import org.apache.lens.cube.metadata.ExprColumn.ExprSpec;
@@ -56,10 +52,7 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.log4j.BasicConfigurator;
 
 import org.glassfish.jersey.client.ClientConfig;
-import org.glassfish.jersey.media.multipart.FormDataBodyPart;
-import org.glassfish.jersey.media.multipart.FormDataContentDisposition;
-import org.glassfish.jersey.media.multipart.FormDataMultiPart;
-import org.glassfish.jersey.media.multipart.MultiPartFeature;
+import org.glassfish.jersey.media.multipart.*;
 import org.testng.Assert;
 import org.testng.annotations.AfterTest;
 import org.testng.annotations.BeforeTest;
@@ -652,7 +645,7 @@ public class TestMetastoreService extends LensJerseyTest {
       assertEquals(hcube.getDimAttributeByName("dim1").getDescription(), 
"first dimension");
       assertEquals(hcube.getDimAttributeByName("dim1").getDisplayString(), 
"Dimension1");
       assertEquals((((BaseDimAttribute) 
hcube.getDimAttributeByName("dim1")).getNumOfDistinctValues().get()),
-          Long.valueOf(2000));
+        Long.valueOf(2000));
 
       assertNotNull(hcube.getDimAttributeByName("testdim2col2"));
       
assertEquals(hcube.getDimAttributeByName("testdim2col2").getDisplayString(), 
"Chained Dimension");
@@ -663,12 +656,12 @@ public class TestMetastoreService extends LensJerseyTest {
       assertEquals(((ReferencedDimAtrribute) 
hcube.getDimAttributeByName("testdim2col2")).getChainName(), "chain1");
       assertEquals(((ReferencedDimAtrribute) 
hcube.getDimAttributeByName("testdim2col2")).getRefColumn(), "col2");
       assertEquals((((ReferencedDimAtrribute) 
hcube.getDimAttributeByName("testdim2col2"))
-          .getNumOfDistinctValues().get()), Long.valueOf(1000));
+        .getNumOfDistinctValues().get()), Long.valueOf(1000));
       assertEquals((((ReferencedDimAtrribute) 
hcube.getDimAttributeByName("testdim2col2"))
-          .getNumOfDistinctValues().get()), Long.valueOf(1000));
+        .getNumOfDistinctValues().get()), Long.valueOf(1000));
 
       assertEquals(((BaseDimAttribute) 
hcube.getDimAttributeByName("dim2")).getNumOfDistinctValues().isPresent(),
-          false);
+        false);
 
       assertNotNull(hcube.getMeasureByName("msr1"));
       assertEquals(hcube.getMeasureByName("msr1").getDescription(), "first 
measure");
@@ -1019,7 +1012,7 @@ public class TestMetastoreService extends LensJerseyTest {
     XDimension dimension = createDimension("testdim");
     APIResult result = target().path("metastore").path("dimensions")
       .queryParam("sessionid", lensSessionId).request(
-      
mediaType).post(Entity.xml(cubeObjectFactory.createXDimension(dimension)), 
APIResult.class);
+        
mediaType).post(Entity.xml(cubeObjectFactory.createXDimension(dimension)), 
APIResult.class);
     assertEquals(result.getStatus(), APIResult.Status.SUCCEEDED);
     XDimensionTable dt = createDimTable("testdim", dimTableName);
     
dt.getStorageTables().getStorageTable().add(createStorageTblElement("test", 
dimTableName, "HOURLY"));
@@ -1343,7 +1336,7 @@ public class TestMetastoreService extends LensJerseyTest {
       // add one more storage table
       createStorage("testAlterDimStorage");
       XStorageTableElement newStorage = 
createStorageTblElement("testAlterDimStorage", dt3.getTableName(),
-        (String[])null);
+        (String[]) null);
       newStorage.getTableDesc().setFieldDelimiter(":");
       dt3.getStorageTables().getStorageTable().add(newStorage);
       // Update the table
@@ -1446,8 +1439,8 @@ public class TestMetastoreService extends LensJerseyTest {
       CubeDimensionTable cdim = JAXBUtils.cubeDimTableFromDimTable(dimTable);
       assertTrue(cdim.getStorages().contains("test"));
       assertTrue(cdim.getStorages().contains("test2"));
-      assertEquals(cdim.getSnapshotDumpPeriods().get("test2"), 
UpdatePeriod.DAILY);
-      assertEquals(cdim.getSnapshotDumpPeriods().get("test"), 
UpdatePeriod.HOURLY);
+      assertEquals(cdim.getSnapshotDumpPeriods().get("test2"), DAILY);
+      assertEquals(cdim.getSnapshotDumpPeriods().get("test"), HOURLY);
 
       result = 
target().path("metastore/dimtables/").path(table).path("storages").path("test")
         .queryParam("sessionid", lensSessionId).request(mediaType)
@@ -1470,7 +1463,7 @@ public class TestMetastoreService extends LensJerseyTest {
       cdim = JAXBUtils.cubeDimTableFromDimTable(dimTable);
       assertFalse(cdim.getStorages().contains("test"));
       assertTrue(cdim.getStorages().contains("test2"));
-      assertEquals(cdim.getSnapshotDumpPeriods().get("test2"), 
UpdatePeriod.DAILY);
+      assertEquals(cdim.getSnapshotDumpPeriods().get("test2"), DAILY);
 
       // add another storage without dump period
       sTbl = createStorageTblElement("test3", table, (String[]) null);
@@ -1625,8 +1618,8 @@ public class TestMetastoreService extends LensJerseyTest {
       assertEquals(cf.getProperties().get("foo"), "bar");
       assertTrue(cf.getStorages().contains("S1"));
       assertTrue(cf.getStorages().contains("S2"));
-      
assertTrue(cf.getUpdatePeriods().get("S1").contains(UpdatePeriod.HOURLY));
-      assertTrue(cf.getUpdatePeriods().get("S2").contains(UpdatePeriod.DAILY));
+      assertTrue(cf.getUpdatePeriods().get("S1").contains(HOURLY));
+      assertTrue(cf.getUpdatePeriods().get("S2").contains(DAILY));
 
       // Finally, drop the fact table
       result = target().path("metastore").path("facts").path(table)
@@ -1714,8 +1707,8 @@ public class TestMetastoreService extends LensJerseyTest {
       CubeFactTable ucf = JAXBUtils.cubeFactFromFactTable(gotFact);
 
       assertEquals(ucf.weight(), 20.0);
-      
assertTrue(ucf.getUpdatePeriods().get("S2").contains(UpdatePeriod.MONTHLY));
-      
assertTrue(ucf.getUpdatePeriods().get("S3").contains(UpdatePeriod.DAILY));
+      assertTrue(ucf.getUpdatePeriods().get("S2").contains(MONTHLY));
+      assertTrue(ucf.getUpdatePeriods().get("S3").contains(DAILY));
 
       boolean foundC2 = false;
       for (FieldSchema fs : cf.getColumns()) {
@@ -1820,9 +1813,9 @@ public class TestMetastoreService extends LensJerseyTest {
       XFactTable gotFact = gotFactElement.getValue();
       CubeFactTable ucf = JAXBUtils.cubeFactFromFactTable(gotFact);
 
-      
assertTrue(ucf.getUpdatePeriods().get("S3").contains(UpdatePeriod.MONTHLY));
-      
assertTrue(ucf.getUpdatePeriods().get("S3").contains(UpdatePeriod.DAILY));
-      
assertTrue(ucf.getUpdatePeriods().get("S3").contains(UpdatePeriod.HOURLY));
+      assertTrue(ucf.getUpdatePeriods().get("S3").contains(MONTHLY));
+      assertTrue(ucf.getUpdatePeriods().get("S3").contains(DAILY));
+      assertTrue(ucf.getUpdatePeriods().get("S3").contains(HOURLY));
 
       // Drop new storage
       result = 
target().path("metastore/facts").path(table).path("storages").path("S3")
@@ -1849,7 +1842,7 @@ public class TestMetastoreService extends LensJerseyTest {
 
     XTimePartSpecElement timePart = 
cubeObjectFactory.createXTimePartSpecElement();
     timePart.setKey(timeDimension);
-    timePart.setValue(JAXBUtils.getXMLGregorianCalendar(partDate));
+    
timePart.setValue(JAXBUtils.getXMLGregorianCalendar(HOURLY.truncate(partDate)));
 
     return createPartition(cubeTableName, Arrays.asList(timePart));
   }
@@ -2008,6 +2001,12 @@ public class TestMetastoreService extends LensJerseyTest 
{
         .post(Entity.xml(cubeObjectFactory.createXPartition(xp)), 
APIResult.class);
       assertEquals(partAddResult.getStatus(), Status.SUCCEEDED);
 
+      xp.setLocation(xp.getLocation() + "/a/b/c");
+      APIResult partUpdateResult = 
target().path("metastore/facts/").path(table).path("storages/S2/partition")
+        .queryParam("sessionid", lensSessionId).request(mediaType)
+        .put(Entity.xml(cubeObjectFactory.createXPartition(xp)), 
APIResult.class);
+      assertEquals(partUpdateResult.getStatus(), Status.SUCCEEDED);
+
       JAXBElement<XPartitionList> partitionsElement = 
target().path("metastore/facts").path(table)
         .path("storages/S2/partitions")
         .queryParam("sessionid", lensSessionId).request(mediaType)
@@ -2016,7 +2015,16 @@ public class TestMetastoreService extends LensJerseyTest 
{
       XPartitionList partitions = partitionsElement.getValue();
       assertNotNull(partitions);
       assertEquals(partitions.getPartition().size(), 1);
-
+      XPartition readPartition = partitions.getPartition().get(0);
+      assertEquals(readPartition.getLocation(), xp.getLocation());
+      assertEquals(readPartition.getTimePartitionSpec(), 
xp.getTimePartitionSpec());
+      assertEquals(readPartition.getNonTimePartitionSpec(), 
xp.getNonTimePartitionSpec());
+      assertNotNull(readPartition.getFullPartitionSpec());
+      XTimePartSpecElement timePartSpec = 
readPartition.getTimePartitionSpec().getPartSpecElement().iterator().next();
+      XPartSpecElement fullPartSpec = 
readPartition.getFullPartitionSpec().getPartSpecElement().iterator().next();
+      assertEquals(timePartSpec.getKey(), fullPartSpec.getKey());
+      
assertEquals(UpdatePeriod.valueOf(xp.getUpdatePeriod().name()).format().format(JAXBUtils.getDateFromXML(
+        timePartSpec.getValue())), fullPartSpec.getValue());
       DateTime date =
         
target().path("metastore/cubes").path("testCube").path("latestdate").queryParam("timeDimension",
 "dt")
           .queryParam("sessionid", 
lensSessionId).request(mediaType).get(DateTime.class);
@@ -2063,7 +2071,7 @@ public class TestMetastoreService extends LensJerseyTest {
       assertEquals(partitions.getPartition().size(), 1);
 
       // Drop again by values
-      String[] val = new 
String[]{UpdatePeriod.HOURLY.format().format(partDate)};
+      String[] val = new String[]{HOURLY.format().format(partDate)};
       dropResult = 
target().path("metastore/facts").path(table).path("storages/S2/partition")
         .queryParam("values", StringUtils.join(val, ","))
         .queryParam("sessionid", lensSessionId).request(mediaType)
@@ -2112,6 +2120,12 @@ public class TestMetastoreService extends LensJerseyTest 
{
         .post(Entity.xml(cubeObjectFactory.createXPartition(xp)), 
APIResult.class);
       assertEquals(partAddResult.getStatus(), Status.SUCCEEDED);
 
+      xp.setLocation(xp.getLocation() + "/a/b/c");
+      APIResult partUpdateResult = 
target().path("metastore/dimtables/").path(table).path("storages/test/partition")
+        .queryParam("sessionid", lensSessionId).request(mediaType)
+        .put(Entity.xml(cubeObjectFactory.createXPartition(xp)), 
APIResult.class);
+      assertEquals(partUpdateResult.getStatus(), Status.SUCCEEDED);
+
       JAXBElement<XPartitionList> partitionsElement = 
target().path("metastore/dimtables").path(table)
         .path("storages/test/partitions")
         .queryParam("sessionid", lensSessionId).request(mediaType)
@@ -2121,6 +2135,37 @@ public class TestMetastoreService extends LensJerseyTest 
{
       assertNotNull(partitions);
       assertEquals(partitions.getPartition().size(), 2);
 
+      assertEquals(partitions.getPartition().get(0).getLocation(), 
xp.getLocation());
+      assertEquals(partitions.getPartition().get(1).getLocation(), 
xp.getLocation());
+
+      // one is latest partition.
+      assertTrue(partitions.getPartition().get(0).getTimePartitionSpec() == 
null
+        || partitions.getPartition().get(1).getTimePartitionSpec() == null);
+      XPartition postedPartition, latestPartition;
+      if (partitions.getPartition().get(0).getTimePartitionSpec() == null) {
+        postedPartition = partitions.getPartition().get(1);
+        latestPartition = partitions.getPartition().get(0);
+      } else {
+        postedPartition = partitions.getPartition().get(0);
+        latestPartition = partitions.getPartition().get(1);
+      }
+
+      assertEquals(postedPartition.getTimePartitionSpec(), 
xp.getTimePartitionSpec());
+      assertEquals(postedPartition.getNonTimePartitionSpec(), 
xp.getNonTimePartitionSpec());
+      assertNotNull(postedPartition.getFullPartitionSpec());
+
+      XTimePartSpecElement timePartSpec = 
postedPartition.getTimePartitionSpec().getPartSpecElement().iterator().next();
+      XPartSpecElement fullPartSpec = 
postedPartition.getFullPartitionSpec().getPartSpecElement().iterator().next();
+      assertEquals(timePartSpec.getKey(), fullPartSpec.getKey());
+      
assertEquals(UpdatePeriod.valueOf(xp.getUpdatePeriod().name()).format().format(JAXBUtils.getDateFromXML(
+        timePartSpec.getValue())), fullPartSpec.getValue());
+
+      assertNull(latestPartition.getTimePartitionSpec());
+      assertNull(latestPartition.getNonTimePartitionSpec());
+      
assertEquals(latestPartition.getFullPartitionSpec().getPartSpecElement().get(0).getValue(),
+        "latest");
+
+
       // Drop the partitions
       APIResult dropResult = 
target().path("metastore/dimtables").path(table).path("storages/test/partitions")
         .queryParam("sessionid", lensSessionId).request(mediaType)
@@ -2159,7 +2204,7 @@ public class TestMetastoreService extends LensJerseyTest {
       assertEquals(partitions.getPartition().size(), 2);
 
       // Drop again by values
-      String[] val = new 
String[]{UpdatePeriod.HOURLY.format().format(partDate)};
+      String[] val = new String[]{HOURLY.format().format(partDate)};
       dropResult = 
target().path("metastore/dimtables").path(table).path("storages/test/partition")
         .queryParam("values", StringUtils.join(val, ","))
         .queryParam("sessionid", lensSessionId).request(mediaType)

http://git-wip-us.apache.org/repos/asf/incubator-lens/blob/7de74eb3/lens-storage-db/src/main/java/org/apache/lens/storage/db/DBStorage.java
----------------------------------------------------------------------
diff --git 
a/lens-storage-db/src/main/java/org/apache/lens/storage/db/DBStorage.java 
b/lens-storage-db/src/main/java/org/apache/lens/storage/db/DBStorage.java
index e6d4132..03b8d43 100644
--- a/lens-storage-db/src/main/java/org/apache/lens/storage/db/DBStorage.java
+++ b/lens-storage-db/src/main/java/org/apache/lens/storage/db/DBStorage.java
@@ -25,6 +25,7 @@ import org.apache.lens.cube.metadata.Storage;
 import org.apache.lens.cube.metadata.StoragePartitionDesc;
 
 import org.apache.hadoop.hive.ql.metadata.HiveException;
+import org.apache.hadoop.hive.ql.metadata.Partition;
 import org.apache.hadoop.hive.ql.metadata.Table;
 
 /**
@@ -158,6 +159,21 @@ public class DBStorage extends Storage {
   public void rollbackAddPartitions(List<StoragePartitionDesc> arg0) throws 
HiveException {
   }
 
+  @Override
+  public void preUpdatePartition(List<Partition> partitions) throws 
HiveException {
+
+  }
+
+  @Override
+  public void commitUpdatePartition(List<Partition> partitions) throws 
HiveException {
+
+  }
+
+  @Override
+  public void rollbackUpdatePartition(List<Partition> partitions) throws 
HiveException {
+
+  }
+
   /*
    * (non-Javadoc)
    *

http://git-wip-us.apache.org/repos/asf/incubator-lens/blob/7de74eb3/lens-storage-db/src/test/java/org/apache/lens/storage/db/TestDBStorage.java
----------------------------------------------------------------------
diff --git 
a/lens-storage-db/src/test/java/org/apache/lens/storage/db/TestDBStorage.java 
b/lens-storage-db/src/test/java/org/apache/lens/storage/db/TestDBStorage.java
index 1e24828..92a0027 100644
--- 
a/lens-storage-db/src/test/java/org/apache/lens/storage/db/TestDBStorage.java
+++ 
b/lens-storage-db/src/test/java/org/apache/lens/storage/db/TestDBStorage.java
@@ -148,7 +148,7 @@ public class TestDBStorage {
 
     // Assert for storage tables
     for (String storage : storageTables.keySet()) {
-      String storageTableName = 
MetastoreUtil.getDimStorageTableName(dimTblName, storage);
+      String storageTableName = 
MetastoreUtil.getFactOrDimtableStorageTableName(dimTblName, storage);
       Assert.assertTrue(client.tableExists(storageTableName));
     }
   }

http://git-wip-us.apache.org/repos/asf/incubator-lens/blob/7de74eb3/src/site/apt/admin/hivedriver-config.apt
----------------------------------------------------------------------
diff --git a/src/site/apt/admin/hivedriver-config.apt 
b/src/site/apt/admin/hivedriver-config.apt
index b668e16..80a3028 100644
--- a/src/site/apt/admin/hivedriver-config.apt
+++ b/src/site/apt/admin/hivedriver-config.apt
@@ -46,15 +46,11 @@ Hive driver configuration
 *--+--+---+--+
 
|11|lens.driver.hive.connection.class|org.apache.lens.driver.hive.EmbeddedThriftConnection|The
 connection class from HiveDriver to HiveServer. The default is an embedded 
connection which does not require a remote hive server. For connecting to a 
hiveserver end point, remote connection should be used. The possible values are 
org.apache.lens.driver.hive.EmbeddedThriftConnection and 
org.apache.lens.driver.hive.RemoteThriftConnection.|
 *--+--+---+--+
-|12|lens.driver.hive.hs2.connection.expiry.delay|600000|The idle time (in 
milliseconds) for expiring connection from hivedriver to HiveServer2|
+|12|lens.driver.hive.cost.calculator.class|org.apache.lens.cube.query.cost.FactPartitionBasedQueryCostCalculator|Cost
 calculator class. By default calculating cost through fact partitions.|
 *--+--+---+--+
-|13|lens.driver.hive.priority.partition.weight.daily|0.75|Weight of monthly 
partition in cost calculation|
+|13|lens.driver.hive.hs2.connection.expiry.delay|600000|The idle time (in 
milliseconds) for expiring connection from hivedriver to HiveServer2|
 *--+--+---+--+
-|14|lens.driver.hive.priority.partition.weight.hourly|1.0|Weight of monthly 
partition in cost calculation|
-*--+--+---+--+
-|15|lens.driver.hive.priority.partition.weight.monthly|0.5|Weight of monthly 
partition in cost calculation|
-*--+--+---+--+
-|16|lens.driver.hive.priority.ranges|VERY_HIGH,7.0,HIGH,30.0,NORMAL,90,LOW|Priority
 Ranges. The numbers are the costs of the query.                                
                                                                                
                                    \ |
+|14|lens.driver.hive.priority.ranges|VERY_HIGH,7.0,HIGH,30.0,NORMAL,90,LOW|Priority
 Ranges. The numbers are the costs of the query.                                
                                                                                
                                    \ |
 |  |                                |                                     |The 
cost is calculated based on partition weights and fact weights. The 
interpretation of the default config is:                                        
                                                    \ |
 |  |                                |                                     |    
                                                                                
                                                                                
                                        \ |
 |  |                                |                                     
|cost \<= 7\ \ \ \ \ \ \ \ \ \ \ :\ \ \ \ \ Priority = VERY_HIGH                
                                                                                
                                             \ |
@@ -70,4 +66,6 @@ Hive driver configuration
 |  |                                |                                     |One 
use case in range tuning can be that you never want queries to run with 
VERY_HIGH, assuming no other changes, you'll modify the value of this param in 
hivedriver-site.xml to be HIGH,30.0,NORMAL,90,LOW\ |
 |  |                                |                                     |via 
the configs, you can tune both the ranges and partition weights. this would 
give the end user more control.                                                 
                                              |
 *--+--+---+--+
+|15|lens.driver.hive.query.hook.class|org.apache.lens.server.api.driver.NoOpDriverQueryHook|The
 query hook class for hive driver. By default hook is No op. To add a hook, you 
should look at the default implementation and from there it'll be easy to 
derive what value can be added through a new hook|
+*--+--+---+--+
 The configuration parameters and their default values

http://git-wip-us.apache.org/repos/asf/incubator-lens/blob/7de74eb3/src/site/apt/user/cli.apt
----------------------------------------------------------------------
diff --git a/src/site/apt/user/cli.apt b/src/site/apt/user/cli.apt
index 353c171..63f2b3f 100644
--- a/src/site/apt/user/cli.apt
+++ b/src/site/apt/user/cli.apt
@@ -92,7 +92,7 @@ User CLI Commands
 *--+--+
 |close/bye|Releases all resources of the server session and exits the shell|
 *--+--+
-|debug [[--enable] To print all logs on cli for debugging purpose]|prints all 
class lelvel logs on cli for debugging purpose|
+|debug [[--enable] To print all logs on cli for debugging purpose]|prints all 
class level logs and verbose logs on cli for debugging purpose. 'debug false'  
to turn off all class level logging and verbose level logging |
 *--+--+
 |get [--key] \<key\>|Fetches and prints session parameter specified with name 
<<<key>>> from lens server|
 *--+--+
@@ -106,7 +106,7 @@ User CLI Commands
 *--+--+
 |show params|Fetches and prints all session parameter from lens server|
 *--+--+
-|verbose [[--enable] Print the clilogger logs on cli]|Show cliLogger logs for 
command|
+|verbose [[--enable] Print the clilogger logs on cli]|Show cliLogger logs on 
cli'verbose false'  turns off the cliLogger logs on console|
 *--+--+
   <<Lens Connection Commands>>
 
@@ -121,7 +121,7 @@ User CLI Commands
 *--+--+
 |create database [--db] \<database-name\> [--ignoreIfExists 
\<ignore-if-exists\>]|create a database with specified name. if 
<<<ignore-if-exists>>> is true, create will not be tried if already exists. 
Default is false|
 *--+--+
-|drop database [--db] \<database-name\>|drop a database with specified name|
+|drop database [--db] \<database-name\> [--cascade ]|drop a database with 
specified name|
 *--+--+
 |show databases|displays list of all databases|
 *--+--+
@@ -237,6 +237,10 @@ User CLI Commands
 *--+--+
 |fact timelines [--fact_name] \<fact_name\> [--storage_name \<storage_name\>] 
[--update_period \<update_period\>] [--time_dimension \<time_dimension\>]|get 
timelines for fact. Can optionally specify storage, update period and time 
dimension to filter by. Instead of time dimension, partition column can be 
directly passed as <<<time_dimension>>>|
 *--+--+
+|fact update partitions [--fact_name] \<fact_name\> [--storage_name] 
\<storage_name\> [--path] \<partition-list-spec-path\>|update multiple 
partition to fact <<<fact_name>>>'s storage <<<storage_name>>>, reading 
partition list spec from <<<partition-list-spec-path>>>|
+*--+--+
+|fact update single-partition [--fact_name] \<fact_name\> [--storage_name] 
\<storage_name\> [--path] \<partition-spec-path\>|update single partition to 
fact <<<fact_name>>>'s storage <<<storage_name>>>, reading spec from 
<<<partition-spec-path>>>|
+*--+--+
 |show facts [[--cube_name] \<cube_name\>]|display list of fact tables in 
current database. If optional <<<cube_name>>> is supplied, only facts belonging 
to cube <<<cube_name>>> will be displayed|
 *--+--+
 |update fact [--fact_name] \<fact_name\> [--path] \<path-to-fact-spec\>|update 
fact <<<fact_name>>> taking spec from <<<path-to-fact-spec>>>|
@@ -274,6 +278,10 @@ User CLI Commands
 *--+--+
 |dimtable list storages [--dimtable_name] \<dimtable_name\>|display list of 
storage associated to dimtable <<<dimtable_name>>>|
 *--+--+
+|dimtable update partitions [--dimtable_name] \<dimtable_name\> 
[--storage_name] \<storage_name\> [--path] \<partition-list-spec-path\>|update 
multiple partition to dimtable <<<dimtable_name>>>'s storage 
<<<storage_name>>>, reading partition list spec from 
<<<partition-list-spec-path>>>|
+*--+--+
+|dimtable update single-partition [--dimtable_name] \<dimtable_name\> 
[--storage_name] \<storage_name\> [--path] \<partition-spec-path\>|update 
single partition to dimtable <<<dimtable_name>>>'s storage <<<storage_name>>>, 
reading spec from <<<partition-spec-path>>>|
+*--+--+
 |drop dimtable [--dimtable_name] \<dimtable_name\> [--cascade 
\<cascade\>]|drop dimtable <<<dimtable_name>>>.  If <<<cascade>>> is true, all 
the storage tables associated with the dimtable <<<dimtable_name>>> are also 
dropped. By default <<<cascade>>> is false|
 *--+--+
 |show dimtables [[--dimension_name] \<dimension_name\>]|display list of 
dimtables in current database. If optional <<<dimension_name>>> is supplied, 
only facts belonging to dimension <<<dimension_name>>> will be displayed|


Reply via email to