http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileFormatProxy.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileFormatProxy.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileFormatProxy.java new file mode 100644 index 0000000..14ff187 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/FileFormatProxy.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.Metastore.SplitInfos; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; + +/** + * Same as PartitionExpressionProxy, but for file format specific methods for metadata cache. + */ +public interface FileFormatProxy { + + /** + * Applies SARG to file metadata, and produces some result for this file. + * @param sarg SARG + * @param fileMetadata File metadata from metastore cache. + * @return The result to return to client for this file, or null if file is eliminated. + */ + SplitInfos applySargToMetadata(SearchArgument sarg, ByteBuffer fileMetadata) throws IOException; + + /** + * @param fs The filesystem of the file. + * @param path The file path. + * @param addedVals Output parameter; additional column values for columns returned by + * getAddedColumnsToCache to cache in MS. + * @return The ORC file metadata for a given file. + */ + ByteBuffer getMetadataToCache( + FileSystem fs, Path path, ByteBuffer[] addedVals) throws IOException; + + /** + * @return Additional column names to cache in MS for this format. + */ + ByteBuffer[] getAddedColumnsToCache(); + + /** + * @param metadata File metadatas. + * @return Additional values for columns returned by getAddedColumnsToCache to cache in MS + * for respective metadatas. + */ + ByteBuffer[][] getAddedValuesToCache(List<ByteBuffer> metadata); + +} \ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaException.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaException.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaException.java new file mode 100644 index 0000000..5bd5a70 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaException.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +public class HiveMetaException extends Exception { + + public HiveMetaException() { + // TODO Auto-generated constructor stub + } + + public HiveMetaException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public HiveMetaException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } + + public HiveMetaException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java new file mode 100644 index 0000000..2534fa2 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; + +/** + * HiveMetaHook defines notification methods which are invoked as part + * of transactions against the metastore, allowing external catalogs + * such as HBase to be kept in sync with Hive's metastore. + * + *<p> + * + * Implementations can use {@link MetaStoreUtils#isExternalTable} to + * distinguish external tables from managed tables. + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface HiveMetaHook { + /** + * Called before a new table definition is added to the metastore + * during CREATE TABLE. + * + * @param table new table definition + */ + public void preCreateTable(Table table) + throws MetaException; + + /** + * Called after failure adding a new table definition to the metastore + * during CREATE TABLE. + * + * @param table new table definition + */ + public void rollbackCreateTable(Table table) + throws MetaException; + + /** + * Called after successfully adding a new table definition to the metastore + * during CREATE TABLE. + * + * @param table new table definition + */ + public void commitCreateTable(Table table) + throws MetaException; + + /** + * Called before a table definition is removed from the metastore + * during DROP TABLE. + * + * @param table table definition + */ + public void preDropTable(Table table) + throws MetaException; + + /** + * Called after failure removing a table definition from the metastore + * during DROP TABLE. + * + * @param table table definition + */ + public void rollbackDropTable(Table table) + throws MetaException; + + /** + * Called after successfully removing a table definition from the metastore + * during DROP TABLE. + * + * @param table table definition + * + * @param deleteData whether to delete data as well; this should typically + * be ignored in the case of an external table + */ + public void commitDropTable(Table table, boolean deleteData) + throws MetaException; +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java new file mode 100644 index 0000000..1cdae9b --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHookLoader.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; + +/** + * HiveMetaHookLoader is responsible for loading a {@link HiveMetaHook} + * for a given table. + */ +public interface HiveMetaHookLoader { + /** + * Loads a hook for the specified table. + * + * @param tbl table of interest + * + * @return hook, or null if none registered + */ + public HiveMetaHook getHook(Table tbl) throws MetaException; +} + +// End HiveMetaHookLoader.java http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java new file mode 100644 index 0000000..8322cb8 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreFsImpl.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.io.FileNotFoundException; + +import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.MetaException; + +public class HiveMetaStoreFsImpl implements MetaStoreFS { + + public static final Logger LOG = LoggerFactory + .getLogger("hive.metastore.hivemetastoreFsimpl"); + + @Override + public boolean deleteDir(FileSystem fs, Path f, boolean recursive, + boolean ifPurge, Configuration conf) throws MetaException { + try { + FileUtils.moveToTrash(fs, f, conf, ifPurge); + if (fs.exists(f)) { + throw new MetaException("Unable to delete directory: " + f); + } + return true; + } catch (FileNotFoundException e) { + return true; // ok even if there is not data + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return false; + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IExtrapolatePartStatus.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IExtrapolatePartStatus.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IExtrapolatePartStatus.java new file mode 100644 index 0000000..aa50e91 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IExtrapolatePartStatus.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.HashMap; +import java.util.Map; + +public interface IExtrapolatePartStatus { + /** + * The sequence of colStatNames. + */ + static String[] colStatNames = new String[] { "LONG_LOW_VALUE", "LONG_HIGH_VALUE", + "DOUBLE_LOW_VALUE", "DOUBLE_HIGH_VALUE", "BIG_DECIMAL_LOW_VALUE", "BIG_DECIMAL_HIGH_VALUE", + "NUM_NULLS", "NUM_DISTINCTS", "AVG_COL_LEN", "MAX_COL_LEN", "NUM_TRUES", "NUM_FALSES", + "AVG_NDV_LONG", "AVG_NDV_DOUBLE", "AVG_NDV_DECIMAL", "SUM_NUM_DISTINCTS" }; + + /** + * The indexes for colstats. + */ + static HashMap<String, Integer[]> indexMaps = new HashMap<String, Integer[]>() { + { + put("bigint", new Integer[] { 0, 1, 6, 7, 12, 15 }); + put("int", new Integer[] { 0, 1, 6, 7, 12, 15 }); + put("smallint", new Integer[] { 0, 1, 6, 7, 12, 15 }); + put("tinyint", new Integer[] { 0, 1, 6, 7, 12, 15 }); + put("date", new Integer[] { 0, 1, 6, 7, 12, 15 }); + put("timestamp", new Integer[] { 0, 1, 6, 7, 12, 15 }); + put("long", new Integer[] { 0, 1, 6, 7, 12, 15 }); + put("double", new Integer[] { 2, 3, 6, 7, 13, 15 }); + put("float", new Integer[] { 2, 3, 6, 7, 13, 15 }); + put("varchar", new Integer[] { 8, 9, 6, 7, 15 }); + put("char", new Integer[] { 8, 9, 6, 7, 15 }); + put("string", new Integer[] { 8, 9, 6, 7, 15 }); + put("boolean", new Integer[] { 10, 11, 6, 15 }); + put("binary", new Integer[] { 8, 9, 6, 15 }); + put("decimal", new Integer[] { 4, 5, 6, 7, 14, 15 }); + put("default", new Integer[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 15 }); + } + }; + + /** + * The sequence of colStatTypes. + */ + static enum ColStatType { + Long, Double, Decimal + } + + static ColStatType[] colStatTypes = new ColStatType[] { ColStatType.Long, ColStatType.Long, + ColStatType.Double, ColStatType.Double, ColStatType.Decimal, ColStatType.Decimal, + ColStatType.Long, ColStatType.Long, ColStatType.Double, ColStatType.Long, ColStatType.Long, + ColStatType.Long, ColStatType.Double, ColStatType.Double, ColStatType.Double, + ColStatType.Long }; + + /** + * The sequence of aggregation function on colStats. + */ + static enum AggrType { + Min, Max, Sum, Avg + } + + static AggrType[] aggrTypes = new AggrType[] { AggrType.Min, AggrType.Max, AggrType.Min, + AggrType.Max, AggrType.Min, AggrType.Max, AggrType.Sum, AggrType.Max, AggrType.Max, + AggrType.Max, AggrType.Sum, AggrType.Sum, AggrType.Avg, AggrType.Avg, AggrType.Avg, + AggrType.Sum }; + + public Object extrapolate(Object[] min, Object[] max, int colStatIndex, + Map<String, Integer> indexMap); + +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java new file mode 100644 index 0000000..633b3c7 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; + +public interface IHMSHandler extends ThriftHiveMetastore.Iface, Configurable { + + void init() throws MetaException; +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java new file mode 100644 index 0000000..be89f9b --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreSchemaInfo.java @@ -0,0 +1,108 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + + +import java.sql.Connection; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper; + +/** + * Defines the method which must be implemented to be used using schema tool to support metastore + * schema upgrades. The configuration hive.metastore.schema.info.class is used to create instances + * of this type by SchemaTool. + * + * Instances of this interface should be created using MetaStoreSchemaInfoFactory class which uses + * two Strings argument constructor to instantiate the implementations of this interface + */ +@InterfaceAudience.Private +public interface IMetaStoreSchemaInfo { + String SQL_FILE_EXTENSION = ".sql"; + + /*** + * Get the list of sql scripts required to upgrade from the give version to current. + * + * @param fromVersion + * @return + * @throws HiveMetaException + */ + List<String> getUpgradeScripts(String fromVersion) throws HiveMetaException; + + /*** + * Get the name of the script to initialize the schema for given version + * + * @param toVersion Target version. If it's null, then the current server version is used + * @return + * @throws HiveMetaException + */ + String generateInitFileName(String toVersion) throws HiveMetaException; + + /** + * Find the directory of metastore scripts + * + * @return the path of directory where the sql scripts are + */ + String getMetaStoreScriptDir(); + + /** + * Get the pre-upgrade script for a given script name. Schema tool runs the pre-upgrade scripts + * returned by this method before running any upgrade scripts. These scripts could contain setup + * statements may fail on some database versions and failure is ignorable. + * + * @param index - index number of the file. The preupgrade script name is derived using the given + * index + * @param scriptName - upgrade script name + * @return name of the pre-upgrade script to be run before running upgrade script + */ + String getPreUpgradeScriptName(int index, String scriptName); + + /** + * Get hive distribution schema version. Schematool uses this version to identify + * the Hive version. It compares this version with the version found in metastore database + * to determine the upgrade or initialization scripts + * @return Hive schema version + */ + String getHiveSchemaVersion(); + + /** + * Get the schema version from the backend database. This version is used by SchemaTool to to + * compare the version returned by getHiveSchemaVersion and determine the upgrade order and + * scripts needed to upgrade the metastore schema + * + * @param metastoreDbConnectionInfo Connection information needed to connect to the backend + * database + * @return + * @throws HiveMetaException when unable to fetch the schema version + */ + String getMetaStoreSchemaVersion( + HiveSchemaHelper.MetaStoreConnectionInfo metastoreDbConnectionInfo) throws HiveMetaException; + /** + * A dbVersion is compatible with hive version if it is greater or equal to the hive version. This + * is result of the db schema upgrade design principles followed in hive project. The state where + * db schema version is ahead of hive software version is often seen when a 'rolling upgrade' or + * 'rolling downgrade' is happening. This is a state where hive is functional and returning non + * zero status for it is misleading. + * + * @param productVersion version of hive software + * @param dbVersion version of metastore rdbms schema + * @return true if versions are compatible + */ + boolean isVersionCompatible(String productVersion, String dbVersion); +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LinearExtrapolatePartStatus.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LinearExtrapolatePartStatus.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LinearExtrapolatePartStatus.java new file mode 100644 index 0000000..526b55b --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LinearExtrapolatePartStatus.java @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.math.BigDecimal; +import java.util.Map; + +public class LinearExtrapolatePartStatus implements IExtrapolatePartStatus { + + @Override + public Object extrapolate(Object[] min, Object[] max, int colStatIndex, + Map<String, Integer> indexMap) { + int rightBorderInd = indexMap.size() - 1; + int minInd = indexMap.get((String) min[1]); + int maxInd = indexMap.get((String) max[1]); + if (minInd == maxInd) { + return min[0]; + } + //note that recent metastore stores decimal in string. + double decimalmin= 0; + double decimalmax = 0; + if (colStatTypes[colStatIndex] == ColStatType.Decimal) { + BigDecimal bdmin = new BigDecimal(min[0].toString()); + decimalmin = bdmin.doubleValue(); + BigDecimal bdmax = new BigDecimal(max[0].toString()); + decimalmax = bdmax.doubleValue(); + } + if (aggrTypes[colStatIndex] == AggrType.Max) { + if (minInd < maxInd) { + // right border is the max + if (colStatTypes[colStatIndex] == ColStatType.Long) { + return (Long) ((Long) min[0] + (((Long) max[0] - (Long) min[0]) + * (rightBorderInd - minInd) / (maxInd - minInd))); + } else if (colStatTypes[colStatIndex] == ColStatType.Double) { + return (Double) ((Double) min[0] + (((Double) max[0] - (Double) min[0]) + * (rightBorderInd - minInd) / (maxInd - minInd))); + } else { + double ret = decimalmin + (decimalmax - decimalmin) + * (rightBorderInd - minInd) / (maxInd - minInd); + return String.valueOf(ret); + } + } else { + // left border is the max + if (colStatTypes[colStatIndex] == ColStatType.Long) { + return (Long) ((Long) min[0] + ((Long) max[0] - (Long) min[0]) + * minInd / (minInd - maxInd)); + } else if (colStatTypes[colStatIndex] == ColStatType.Double) { + return (Double) ((Double) min[0] + ((Double) max[0] - (Double) min[0]) + * minInd / (minInd - maxInd)); + } else { + double ret = decimalmin + (decimalmax - decimalmin) * minInd + / (minInd - maxInd); + return String.valueOf(ret); + } + } + } else { + if (minInd < maxInd) { + // left border is the min + if (colStatTypes[colStatIndex] == ColStatType.Long) { + Long ret = (Long) max[0] - ((Long) max[0] - (Long) min[0]) * maxInd + / (maxInd - minInd); + return ret; + } else if (colStatTypes[colStatIndex] == ColStatType.Double) { + Double ret = (Double) max[0] - ((Double) max[0] - (Double) min[0]) + * maxInd / (maxInd - minInd); + return ret; + } else { + double ret = decimalmax - (decimalmax - decimalmin) * maxInd + / (maxInd - minInd); + return String.valueOf(ret); + } + } else { + // right border is the min + if (colStatTypes[colStatIndex] == ColStatType.Long) { + Long ret = (Long) max[0] - ((Long) max[0] - (Long) min[0]) + * (rightBorderInd - maxInd) / (minInd - maxInd); + return ret; + } else if (colStatTypes[colStatIndex] == ColStatType.Double) { + Double ret = (Double) max[0] - ((Double) max[0] - (Double) min[0]) + * (rightBorderInd - maxInd) / (minInd - maxInd); + return ret; + } else { + double ret = decimalmax - (decimalmax - decimalmin) + * (rightBorderInd - maxInd) / (minInd - maxInd); + return String.valueOf(ret); + } + } + } + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java new file mode 100644 index 0000000..de6c718 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockComponentBuilder.java @@ -0,0 +1,121 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.metastore.api.DataOperationType; +import org.apache.hadoop.hive.metastore.api.LockComponent; +import org.apache.hadoop.hive.metastore.api.LockLevel; +import org.apache.hadoop.hive.metastore.api.LockType; + +/** + * A builder for {@link LockComponent}s + */ +public class LockComponentBuilder { + private LockComponent component; + private boolean tableNameSet; + private boolean partNameSet; + + public LockComponentBuilder() { + component = new LockComponent(); + tableNameSet = partNameSet = false; + } + + /** + * Set the lock to be exclusive. + * @return reference to this builder + */ + public LockComponentBuilder setExclusive() { + component.setType(LockType.EXCLUSIVE); + return this; + } + + /** + * Set the lock to be semi-shared. + * @return reference to this builder + */ + public LockComponentBuilder setSemiShared() { + component.setType(LockType.SHARED_WRITE); + return this; + } + + /** + * Set the lock to be shared. + * @return reference to this builder + */ + public LockComponentBuilder setShared() { + component.setType(LockType.SHARED_READ); + return this; + } + + /** + * Set the database name. + * @param dbName database name + * @return reference to this builder + */ + public LockComponentBuilder setDbName(String dbName) { + component.setDbname(dbName); + return this; + } + + public LockComponentBuilder setOperationType(DataOperationType dop) { + component.setOperationType(dop); + return this; + } + + public LockComponentBuilder setIsAcid(boolean t) { + component.setIsAcid(t); + return this; + } + /** + * Set the table name. + * @param tableName table name + * @return reference to this builder + */ + public LockComponentBuilder setTableName(String tableName) { + component.setTablename(tableName); + tableNameSet = true; + return this; + } + + /** + * Set the partition name. + * @param partitionName partition name + * @return reference to this builder + */ + public LockComponentBuilder setPartitionName(String partitionName) { + component.setPartitionname(partitionName); + partNameSet = true; + return this; + } + public LockComponentBuilder setIsDynamicPartitionWrite(boolean t) { + component.setIsDynamicPartitionWrite(t); + return this; + } + + /** + * Get the constructed lock component. + * @return lock component. + */ + public LockComponent build() { + LockLevel level = LockLevel.DB; + if (tableNameSet) level = LockLevel.TABLE; + if (partNameSet) level = LockLevel.PARTITION; + component.setLevel(level); + return component; + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java new file mode 100644 index 0000000..d03c73a --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/LockRequestBuilder.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.metastore.api.LockComponent; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockType; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.LinkedHashMap; +import java.util.Map; + +/** + * Builder class to make constructing {@link LockRequest} easier. + */ +public class LockRequestBuilder { + + private LockRequest req; + private LockTrie trie; + private boolean userSet; + + /** + * @deprecated + */ + public LockRequestBuilder() { + this(null); + } + public LockRequestBuilder(String agentInfo) { + req = new LockRequest(); + trie = new LockTrie(); + userSet = false; + if(agentInfo != null) { + req.setAgentInfo(agentInfo); + } + } + + /** + * Get the constructed LockRequest. + * @return lock request + */ + public LockRequest build() { + if (!userSet) { + throw new RuntimeException("Cannot build a lock without giving a user"); + } + trie.addLocksToRequest(req); + try { + req.setHostname(InetAddress.getLocalHost().getHostName()); + } catch (UnknownHostException e) { + throw new RuntimeException("Unable to determine our local host!"); + } + return req; + } + + /** + * Set the transaction id. + * @param txnid transaction id + * @return reference to this builder + */ + public LockRequestBuilder setTransactionId(long txnid) { + req.setTxnid(txnid); + return this; + } + + public LockRequestBuilder setUser(String user) { + if (user == null) user = "unknown"; + req.setUser(user); + userSet = true; + return this; + } + + /** + * Add a lock component to the lock request + * @param component to add + * @return reference to this builder + */ + public LockRequestBuilder addLockComponent(LockComponent component) { + trie.add(component); + return this; + } + + // For reasons that are completely incomprehensible to me the semantic + // analyzers often ask for multiple locks on the same entity (for example + // a shared_read and an exlcusive lock). The db locking system gets confused + // by this and dead locks on it. To resolve that, we'll make sure in the + // request that multiple locks are coalesced and promoted to the higher + // level of locking. To do this we put all locks components in trie based + // on dbname, tablename, partition name and handle the promotion as new + // requests come in. This structure depends on the fact that null is a + // valid key in a LinkedHashMap. So a database lock will map to (dbname, null, + // null). + private static class LockTrie { + Map<String, TableTrie> trie; + + LockTrie() { + trie = new LinkedHashMap<>(); + } + + public void add(LockComponent comp) { + TableTrie tabs = trie.get(comp.getDbname()); + if (tabs == null) { + tabs = new TableTrie(); + trie.put(comp.getDbname(), tabs); + } + setTable(comp, tabs); + } + + public void addLocksToRequest(LockRequest request) { + for (TableTrie tab : trie.values()) { + for (PartTrie part : tab.values()) { + for (LockComponent lock : part.values()) { + request.addToComponent(lock); + } + } + } + } + + private void setTable(LockComponent comp, TableTrie tabs) { + PartTrie parts = tabs.get(comp.getTablename()); + if (parts == null) { + parts = new PartTrie(); + tabs.put(comp.getTablename(), parts); + } + setPart(comp, parts); + } + + private void setPart(LockComponent comp, PartTrie parts) { + LockComponent existing = parts.get(comp.getPartitionname()); + if (existing == null) { + // No existing lock for this partition. + parts.put(comp.getPartitionname(), comp); + } else if (existing.getType() != LockType.EXCLUSIVE && + (comp.getType() == LockType.EXCLUSIVE || + comp.getType() == LockType.SHARED_WRITE)) { + // We only need to promote if comp.type is > existing.type. For + // efficiency we check if existing is exclusive (in which case we + // need never promote) or if comp is exclusive or shared_write (in + // which case we can promote even though they may both be shared + // write). If comp is shared_read there's never a need to promote. + parts.put(comp.getPartitionname(), comp); + } + } + + private static class TableTrie extends LinkedHashMap<String, PartTrie> { + } + + private static class PartTrie extends LinkedHashMap<String, LockComponent> { + } + + + + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionContext.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionContext.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionContext.java new file mode 100644 index 0000000..998531f --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionContext.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +/** + * Base class which provides context to implementations of MetaStoreEndFunctionListener + */ + +public class MetaStoreEndFunctionContext { + + /** + * whether method was successful or not. + */ + private final boolean success; + private final Exception e; + private final String inputTableName; + + public MetaStoreEndFunctionContext(boolean success, Exception e, String inputTableName) { + this.success = success; + this.e = e; + this.inputTableName = inputTableName; + } + + public MetaStoreEndFunctionContext(boolean success) { + this(success, null, null); + } + + /** + * @return whether or not the method succeeded. + */ + public boolean isSuccess() { + return success; + } + + public Exception getException() { + return e; + } + + public String getInputTableName() { + return inputTableName; + } + +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionListener.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionListener.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionListener.java new file mode 100644 index 0000000..bc7e0be --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreEndFunctionListener.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.AbstractMap; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; + +/** + * This abstract class needs to be extended to provide implementation of actions that need + * to be performed when a function ends. These methods are called whenever a function ends. + * + * It also provides a way to add fb303 counters through the exportCounters method. + */ + +public abstract class MetaStoreEndFunctionListener implements Configurable { + + private Configuration conf; + + public MetaStoreEndFunctionListener(Configuration config){ + this.conf = config; + } + + public abstract void onEndFunction(String functionName, MetaStoreEndFunctionContext context); + + // Unless this is overridden, it does nothing + public void exportCounters(AbstractMap<String, Long> counters) { + } + + @Override + public Configuration getConf() { + return this.conf; + } + + @Override + public void setConf(Configuration config) { + this.conf = config; + } + + +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java new file mode 100644 index 0000000..ddcda4c --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFS.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.api.MetaException; + +/** + * Define a set of APIs that may vary in different environments + */ +public interface MetaStoreFS { + + /** + * delete a directory + * + * @param f + * @param ifPurge + * @param recursive + * @return true on success + * @throws MetaException + */ + public boolean deleteDir(FileSystem fs, Path f, boolean recursive, + boolean ifPurge, Configuration conf) throws MetaException; + +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java new file mode 100644 index 0000000..fb341b2 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreFilterHook.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionSpec; +import org.apache.hadoop.hive.metastore.api.Table; + +/** + * Metadata filter hook for metastore client. This will be useful for authorization + * plugins on hiveserver2 to filter metadata results, especially in case of + * non-impersonation mode where the metastore doesn't know the end user's identity. + */ +@InterfaceAudience.LimitedPrivate(value = {"Apache Sentry (Incubating)" }) +@InterfaceStability.Evolving +public interface MetaStoreFilterHook { + + /** + * Filter given list of databases + * @param dbList + * @return List of filtered Db names + */ + public List<String> filterDatabases(List<String> dbList) throws MetaException; + + /** + * filter to given database object if applicable + * @param dataBase + * @return the same database if it's not filtered out + * @throws NoSuchObjectException + */ + public Database filterDatabase(Database dataBase) throws MetaException, NoSuchObjectException; + + /** + * Filter given list of tables + * @param dbName + * @param tableList + * @return List of filtered table names + */ + public List<String> filterTableNames(String dbName, List<String> tableList) throws MetaException; + + /** + * filter to given table object if applicable + * @param table + * @return the same table if it's not filtered out + * @throws NoSuchObjectException + */ + public Table filterTable(Table table) throws MetaException, NoSuchObjectException; + + /** + * Filter given list of tables + * @param tableList + * @return List of filtered table names + */ + public List<Table> filterTables(List<Table> tableList) throws MetaException; + + /** + * Filter given list of partitions + * @param partitionList + * @return + */ + public List<Partition> filterPartitions(List<Partition> partitionList) throws MetaException; + + /** + * Filter given list of partition specs + * @param partitionSpecList + * @return + */ + public List<PartitionSpec> filterPartitionSpecs(List<PartitionSpec> partitionSpecList) + throws MetaException; + + /** + * filter to given partition object if applicable + * @param partition + * @return the same partition object if it's not filtered out + * @throws NoSuchObjectException + */ + public Partition filterPartition(Partition partition) throws MetaException, NoSuchObjectException; + + /** + * Filter given list of partition names + * @param dbName + * @param tblName + * @param partitionNames + * @return + */ + public List<String> filterPartitionNames(String dbName, String tblName, + List<String> partitionNames) throws MetaException; + + public Index filterIndex(Index index) throws MetaException, NoSuchObjectException; + + /** + * Filter given list of index names + * @param dbName + * @param tblName + * @param indexList + * @return + */ + public List<String> filterIndexNames(String dbName, String tblName, + List<String> indexList) throws MetaException; + + /** + * Filter given list of index objects + * @param indexeList + * @return + */ + public List<Index> filterIndexes(List<Index> indexeList) throws MetaException; +} + http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java new file mode 100644 index 0000000..d3eee85 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreInit.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.hooks.JDOConnectionURLHook; +import org.apache.hadoop.util.ReflectionUtils; + +/** + * MetaStoreInit defines functions to init/update MetaStore connection url. + * + */ +public class MetaStoreInit { + + private static final Logger LOG = LoggerFactory.getLogger(MetaStoreInit.class); + + static class MetaStoreInitData { + JDOConnectionURLHook urlHook = null; + String urlHookClassName = ""; + } + + /** + * Updates the connection URL in hiveConf using the hook (if a hook has been + * set using hive.metastore.ds.connection.url.hook property) + * @param originalConf - original configuration used to look up hook settings + * @param activeConf - the configuration file in use for looking up db url + * @param badUrl + * @param updateData - hook information + * @return true if a new connection URL was loaded into the thread local + * configuration + * @throws MetaException + */ + static boolean updateConnectionURL(Configuration originalConf, Configuration activeConf, + String badUrl, MetaStoreInitData updateData) + throws MetaException { + String connectUrl = null; + String currentUrl = MetaStoreInit.getConnectionURL(activeConf); + try { + // We always call init because the hook name in the configuration could + // have changed. + MetaStoreInit.initConnectionUrlHook(originalConf, updateData); + if (updateData.urlHook != null) { + if (badUrl != null) { + updateData.urlHook.notifyBadConnectionUrl(badUrl); + } + connectUrl = updateData.urlHook.getJdoConnectionUrl(originalConf); + } + } catch (Exception e) { + LOG.error("Exception while getting connection URL from the hook: " + + e); + } + + if (connectUrl != null && !connectUrl.equals(currentUrl)) { + LOG.error( + String.format("Overriding %s with %s", + MetastoreConf.ConfVars.CONNECTURLKEY.toString(), + connectUrl)); + MetastoreConf.setVar(activeConf, ConfVars.CONNECTURLKEY, connectUrl); + return true; + } + return false; + } + + static String getConnectionURL(Configuration conf) { + return MetastoreConf.getVar(conf, ConfVars.CONNECTURLKEY, ""); + } + + // Multiple threads could try to initialize at the same time. + synchronized private static void initConnectionUrlHook(Configuration conf, + MetaStoreInitData updateData) throws ClassNotFoundException { + + String className = MetastoreConf.getVar(conf, ConfVars.CONNECTURLHOOK, "").trim(); + if (className.equals("")) { + updateData.urlHookClassName = ""; + updateData.urlHook = null; + return; + } + boolean urlHookChanged = !updateData.urlHookClassName.equals(className); + if (updateData.urlHook == null || urlHookChanged) { + updateData.urlHookClassName = className.trim(); + + Class<?> urlHookClass = Class.forName(updateData.urlHookClassName, true, + JavaUtils.getClassLoader()); + updateData.urlHook = (JDOConnectionURLHook) ReflectionUtils.newInstance(urlHookClass, null); + } + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java new file mode 100644 index 0000000..0c36855 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfo.java @@ -0,0 +1,234 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper; +import org.apache.hadoop.hive.metastore.tools.HiveSchemaHelper.MetaStoreConnectionInfo; + +import com.google.common.collect.ImmutableMap; +import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo; + + +public class MetaStoreSchemaInfo implements IMetaStoreSchemaInfo { + protected static final String UPGRADE_FILE_PREFIX = "upgrade-"; + private static final String INIT_FILE_PREFIX = "hive-schema-"; + private static final String VERSION_UPGRADE_LIST = "upgrade.order"; + private static final String PRE_UPGRADE_PREFIX = "pre-"; + protected final String dbType; + private String[] hiveSchemaVersions; + private final String hiveHome; + + // Some version upgrades often don't change schema. So they are equivalent to + // a version + // that has a corresponding schema. eg "0.13.1" is equivalent to "0.13.0" + private static final Map<String, String> EQUIVALENT_VERSIONS = + ImmutableMap.of("0.13.1", "0.13.0", + "1.0.0", "0.14.0", + "1.0.1", "1.0.0", + "1.1.1", "1.1.0", + "1.2.1", "1.2.0" + ); + + public MetaStoreSchemaInfo(String hiveHome, String dbType) throws HiveMetaException { + this.hiveHome = hiveHome; + this.dbType = dbType; + } + + private void loadAllUpgradeScripts(String dbType) throws HiveMetaException { + // load upgrade order for the given dbType + List<String> upgradeOrderList = new ArrayList<>(); + String upgradeListFile = getMetaStoreScriptDir() + File.separator + + VERSION_UPGRADE_LIST + "." + dbType; + try (FileReader fr = new FileReader(upgradeListFile); + BufferedReader bfReader = new BufferedReader(fr)) { + String currSchemaVersion; + while ((currSchemaVersion = bfReader.readLine()) != null) { + upgradeOrderList.add(currSchemaVersion.trim()); + } + } catch (FileNotFoundException e) { + throw new HiveMetaException("File " + upgradeListFile + "not found ", e); + } catch (IOException e) { + throw new HiveMetaException("Error reading " + upgradeListFile, e); + } + hiveSchemaVersions = upgradeOrderList.toArray(new String[0]); + } + + /*** + * Get the list of sql scripts required to upgrade from the give version to current + * @param fromVersion + * @return + * @throws HiveMetaException + */ + @Override + public List<String> getUpgradeScripts(String fromVersion) + throws HiveMetaException { + List <String> upgradeScriptList = new ArrayList<>(); + + // check if we are already at current schema level + if (getHiveSchemaVersion().equals(fromVersion)) { + return upgradeScriptList; + } + loadAllUpgradeScripts(dbType); + // Find the list of scripts to execute for this upgrade + int firstScript = hiveSchemaVersions.length; + for (int i=0; i < hiveSchemaVersions.length; i++) { + if (hiveSchemaVersions[i].startsWith(fromVersion)) { + firstScript = i; + } + } + if (firstScript == hiveSchemaVersions.length) { + throw new HiveMetaException("Unknown version specified for upgrade " + + fromVersion + " Metastore schema may be too old or newer"); + } + + for (int i=firstScript; i < hiveSchemaVersions.length; i++) { + String scriptFile = generateUpgradeFileName(hiveSchemaVersions[i]); + upgradeScriptList.add(scriptFile); + } + return upgradeScriptList; + } + + /*** + * Get the name of the script to initialize the schema for given version + * @param toVersion Target version. If it's null, then the current server version is used + * @return + * @throws HiveMetaException + */ + @Override + public String generateInitFileName(String toVersion) throws HiveMetaException { + if (toVersion == null) { + toVersion = getHiveSchemaVersion(); + } + String initScriptName = INIT_FILE_PREFIX + toVersion + "." + + dbType + SQL_FILE_EXTENSION; + // check if the file exists + if (!(new File(getMetaStoreScriptDir() + File.separatorChar + + initScriptName).exists())) { + throw new HiveMetaException("Unknown version specified for initialization: " + toVersion); + } + return initScriptName; + } + + /** + * Find the directory of metastore scripts + * @return + */ + @Override + public String getMetaStoreScriptDir() { + return hiveHome + File.separatorChar + + "scripts" + File.separatorChar + "metastore" + + File.separatorChar + "upgrade" + File.separatorChar + dbType; + } + + // format the upgrade script name eg upgrade-x-y-dbType.sql + private String generateUpgradeFileName(String fileVersion) { + return UPGRADE_FILE_PREFIX + fileVersion + "." + dbType + SQL_FILE_EXTENSION; + } + + @Override + public String getPreUpgradeScriptName(int index, String upgradeScriptName) { + return PRE_UPGRADE_PREFIX + index + "-" + upgradeScriptName; + } + + @Override + public String getHiveSchemaVersion() { + String hiveVersion = MetastoreVersionInfo.getShortVersion(); + return getEquivalentVersion(hiveVersion); + } + + private static String getEquivalentVersion(String hiveVersion) { + // if there is an equivalent version, return that, else return this version + String equivalentVersion = EQUIVALENT_VERSIONS.get(hiveVersion); + if (equivalentVersion != null) { + return equivalentVersion; + } else { + return hiveVersion; + } + } + + @Override + public boolean isVersionCompatible(String hiveVersion, String dbVersion) { + hiveVersion = getEquivalentVersion(hiveVersion); + dbVersion = getEquivalentVersion(dbVersion); + if (hiveVersion.equals(dbVersion)) { + return true; + } + String[] hiveVerParts = hiveVersion.split("\\."); + String[] dbVerParts = dbVersion.split("\\."); + if (hiveVerParts.length != 3 || dbVerParts.length != 3) { + // these are non standard version numbers. can't perform the + // comparison on these, so assume that they are incompatible + return false; + } + + for (int i = 0; i < dbVerParts.length; i++) { + int dbVerPart = Integer.parseInt(dbVerParts[i]); + int hiveVerPart = Integer.parseInt(hiveVerParts[i]); + if (dbVerPart > hiveVerPart) { + return true; + } else if (dbVerPart < hiveVerPart) { + return false; + } else { + continue; // compare next part + } + } + + return true; + } + + @Override + public String getMetaStoreSchemaVersion(MetaStoreConnectionInfo connectionInfo) + throws HiveMetaException { + String versionQuery; + boolean needsQuotedIdentifier = + HiveSchemaHelper.getDbCommandParser(connectionInfo.getDbType()).needsQuotedIdentifier(); + if (needsQuotedIdentifier) { + versionQuery = "select t.\"SCHEMA_VERSION\" from \"VERSION\" t"; + } else { + versionQuery = "select t.SCHEMA_VERSION from VERSION t"; + } + try (Connection metastoreDbConnection = + HiveSchemaHelper.getConnectionToMetastore(connectionInfo); Statement stmt = + metastoreDbConnection.createStatement()) { + ResultSet res = stmt.executeQuery(versionQuery); + if (!res.next()) { + throw new HiveMetaException("Could not find version info in metastore VERSION table."); + } + String currentSchemaVersion = res.getString(1); + if (res.next()) { + throw new HiveMetaException("Multiple versions were found in metastore."); + } + return currentSchemaVersion; + } catch (SQLException e) { + throw new HiveMetaException("Failed to get schema version, Cause:" + e.getMessage()); + } + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfoFactory.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfoFactory.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfoFactory.java new file mode 100644 index 0000000..d03b7ae --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreSchemaInfoFactory.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Factory class implementation to create instances of IMetaStoreSchemaInfo + * based on the provided configuration + */ +public class MetaStoreSchemaInfoFactory { + public static final Logger LOG = LoggerFactory.getLogger(MetaStoreSchemaInfoFactory.class); + + public static IMetaStoreSchemaInfo get(Configuration conf) { + String hiveHome = System.getenv("HIVE_HOME"); + if (hiveHome == null) { + LOG.debug("HIVE_HOME is not set. Using current directory instead"); + hiveHome = "."; + } + return get(conf, hiveHome, null); + } + + public static IMetaStoreSchemaInfo get(Configuration conf, String hiveHome, String dbType) { + String className = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.SCHEMA_INFO_CLASS); + Class<?> clasz; + try { + clasz = conf.getClassByName(className); + } catch (ClassNotFoundException e) { + LOG.error("Unable to load class " + className, e); + throw new IllegalArgumentException(e); + } + Constructor<?> constructor; + try { + constructor = clasz.getConstructor(String.class, String.class); + constructor.setAccessible(true); + return (IMetaStoreSchemaInfo) constructor.newInstance(hiveHome, dbType); + } catch (NoSuchMethodException | InstantiationException | IllegalAccessException + | IllegalArgumentException | InvocationTargetException e) { + LOG.error("Unable to create instance of class " + className, e); + throw new IllegalArgumentException(e); + } + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java new file mode 100644 index 0000000..10f7732 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreThread.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.MetaException; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * A thread that runs in the metastore, separate from the threads in the thrift service. + */ +public interface MetaStoreThread extends Configurable { + + /** + * Set the id for this thread. + * @param threadId id of the thread + */ + void setThreadId(int threadId); + + /** + * Initialize the thread. This must not be called until after + * {@link #setConf(Configuration)} and {@link #setThreadId(int)} + * have been called. + * @param stop a flag to watch for when to stop. If this value is set to true, + * the thread will terminate the next time through its main loop. + * @param looped a flag that is set to true everytime a thread goes through it's main loop. + * This is purely for testing so that tests can assure themselves that the thread + * has run through it's loop once. The test can set this value to false. The + * thread should then assure that the loop has been gone completely through at + * least once. + */ + void init(AtomicBoolean stop, AtomicBoolean looped) throws MetaException; + + /** + * Run the thread in the background. This must not be called until + * {@link MetaStoreThread#init(java.util.concurrent.atomic.AtomicBoolean,java.util.concurrent.atomic.AtomicBoolean)} has + * been called. + */ + void start(); +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java new file mode 100644 index 0000000..40018c9 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartitionDropOptions.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +/** + * Class to generalize the switches for dropPartitions(). + */ +public class PartitionDropOptions { + + public boolean deleteData = true; + public boolean ifExists = false; + public boolean returnResults = true; + public boolean purgeData = false; + + public static PartitionDropOptions instance() { return new PartitionDropOptions(); } + + public PartitionDropOptions deleteData(boolean deleteData) { + this.deleteData = deleteData; + return this; + } + + public PartitionDropOptions ifExists(boolean ifExists) { + this.ifExists = ifExists; + return this; + } + + public PartitionDropOptions returnResults(boolean returnResults) { + this.returnResults = returnResults; + return this; + } + + public PartitionDropOptions purgeData(boolean purgeData) { + this.purgeData = purgeData; + return this; + } + +} // class PartitionDropSwitches; + http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java new file mode 100644 index 0000000..af0a6bd --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/PartitionExpressionProxy.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; + +/** + * The proxy interface that metastore uses for variety of QL operations (metastore can't depend + * on QL because QL depends on metastore; creating metastore-client module would be a proper way + * to solve this problem). + */ +public interface PartitionExpressionProxy { + + /** + * Converts serialized Hive expression into filter in the format suitable for Filter.g. + * @param expr Serialized expression. + * @return The filter string. + */ + public String convertExprToFilter(byte[] expr) throws MetaException; + + /** + * Filters the partition names via serialized Hive expression. + * @param partColumns Partition columns in the underlying table. + * @param expr Serialized expression. + * @param defaultPartitionName Default partition name from job or server configuration. + * @param partitionNames Partition names; the list is modified in place. + * @return Whether there were any unknown partitions preserved in the name list. + */ + boolean filterPartitionsByExpr(List<FieldSchema> partColumns, + byte[] expr, String defaultPartitionName, List<String> partitionNames) throws MetaException; + + /** + * Determines the file metadata type from input format of the source table or partition. + * @param inputFormat Input format name. + * @return The file metadata type. + */ + FileMetadataExprType getMetadataType(String inputFormat); + + /** + * Gets a separate proxy that can be used to call file-format-specific methods. + * @param type The file metadata type. + * @return The proxy. + */ + FileFormatProxy getFileFormatProxy(FileMetadataExprType type); + + /** + * Creates SARG from serialized representation. + * @param expr SARG, serialized as Kryo. + * @return SARG. + */ + SearchArgument createSarg(byte[] expr); +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TServerSocketKeepAlive.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TServerSocketKeepAlive.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TServerSocketKeepAlive.java new file mode 100644 index 0000000..95bd76e --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TServerSocketKeepAlive.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.net.SocketException; + +import org.apache.thrift.transport.TServerSocket; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransportException; + +/** + * TServerSocketKeepAlive - like TServerSocket, but will enable keepalive for + * accepted sockets. + * + */ +public class TServerSocketKeepAlive extends TServerSocket { + public TServerSocketKeepAlive(TServerSocket serverSocket) throws TTransportException { + super(serverSocket.getServerSocket()); + } + + @Override + protected TSocket acceptImpl() throws TTransportException { + TSocket ts = super.acceptImpl(); + try { + ts.getSocket().setKeepAlive(true); + } catch (SocketException e) { + throw new TTransportException(e); + } + return ts; + } +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TableType.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TableType.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TableType.java new file mode 100644 index 0000000..e9e16d7 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/TableType.java @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +/** + * Typesafe enum for types of tables described by the metastore. + */ +public enum TableType { + MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW, INDEX_TABLE, MATERIALIZED_VIEW +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/annotation/MetastoreVersionAnnotation.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/annotation/MetastoreVersionAnnotation.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/annotation/MetastoreVersionAnnotation.java new file mode 100644 index 0000000..0542546 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/annotation/MetastoreVersionAnnotation.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.annotation; + +import org.apache.hadoop.classification.InterfaceStability; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + + +/** + * HiveVersionAnnotation. + * + */ +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.PACKAGE) +@InterfaceStability.Unstable +public @interface MetastoreVersionAnnotation { + + /** + * Get the Hive version + * @return the version string "0.6.3-dev" + */ + String version(); + + /** + * Get the Hive short version containing major/minor/change version numbers + * @return the short version string "0.6.3" + */ + String shortVersion(); + + /** + * Get the username that compiled Hive. + */ + String user(); + + /** + * Get the date when Hive was compiled. + * @return the date in unix 'date' format + */ + String date(); + + /** + * Get the url for the git repository. + */ + String url(); + + /** + * Get the git revision. + * @return the revision number as a string (eg. "451451") + */ + String revision(); + + /** + * Get the branch from which this was compiled. + * @return The branch name, e.g. "trunk" or "branches/branch-0.20" + */ + String branch(); + + /** + * Get a checksum of the source files from which + * Hive was compiled. + * @return a string that uniquely identifies the source + **/ + String srcChecksum(); + +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/annotation/NoReconnect.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/annotation/NoReconnect.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/annotation/NoReconnect.java new file mode 100644 index 0000000..edf0831 --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/annotation/NoReconnect.java @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.hadoop.hive.metastore.annotation; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Target({ElementType.METHOD}) +@Retention(RetentionPolicy.RUNTIME) +public @interface NoReconnect { +} http://git-wip-us.apache.org/repos/asf/hive/blob/b0b6db73/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java ---------------------------------------------------------------------- diff --git a/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java new file mode 100644 index 0000000..65084bd --- /dev/null +++ b/standalone-metastore/src/main/java/org/apache/hadoop/hive/metastore/hooks/JDOConnectionURLHook.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.hooks; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; + +/** + * JDOConnectURLHook is used to get the URL that JDO uses to connect to the + * database that stores the metastore data. Classes implementing this must be + * thread-safe (for Thrift server). + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public interface JDOConnectionURLHook { + + /** + * Gets the connection URL to supply to JDO. In addition to initialization, + * this method will be called after a connection failure for each reconnect + * attempt. + * + * @param conf The configuration used to initialize this instance of the HMS + * @return the connection URL + * @throws Exception + */ + public String getJdoConnectionUrl(Configuration conf) + throws Exception; + + /** + * Alerts this that the connection URL was bad. Can be used to collect stats, + * etc. + * + * @param url + */ + public void notifyBadConnectionUrl(String url); +}