diff --git a/bin/generate-sources.sh b/bin/generate-sources.sh
index 65d7359..db2576a 100755
--- a/bin/generate-sources.sh
+++ b/bin/generate-sources.sh
@@ -5,12 +5,43 @@ set -eu
cd "${BASH_SOURCE%/*}/.."
TARGET=/build/target/generated-sources/thrift
+GRADLE_VERSION=8.4
+THRIFT_VERSION=$(mvn -q help:evaluate -Dexpression=libthrift.version -DforceStdout)
-# use unstable for Thrift 0.19.0
-docker run -v "${PWD}:/build" --rm debian:unstable /bin/sh -c "\
+docker run -v "${PWD}:/build" --rm debian:bookworm /bin/sh -c "\
set -eux
apt-get update -q
-apt-get install -q -y thrift-compiler
+apt-get install -q -y \
+ ant \
+ automake\
+ bison \
+ flex \
+ g++ \
+ git \
+ libboost-all-dev \
+ libevent-dev \
+ libssl-dev \
+ libtool \
+ make \
+ openjdk-17-jdk-headless \
+ pkg-config \
+ unzip \
+ wget
+
+wget https://services.gradle.org/distributions/gradle-$GRADLE_VERSION-bin.zip -q \
+ -O /tmp/gradle-$GRADLE_VERSION-bin.zip
+unzip -d /tmp /tmp/gradle-$GRADLE_VERSION-bin.zip
+mv /tmp/gradle-$GRADLE_VERSION /usr/local/gradle
+ln -s /usr/local/gradle/bin/gradle /usr/local/bin
+
+wget 'https://dlcdn.apache.org/thrift/$THRIFT_VERSION/thrift-$THRIFT_VERSION.tar.gz'
+tar zxf thrift-$THRIFT_VERSION.tar.gz
+cd thrift-$THRIFT_VERSION
+./bootstrap.sh
+./configure --without-cpp --without-kotlin --without-python --without-py3
+make
+make install
+
rm -rf $TARGET
mkdir -p $TARGET
thrift -o $TARGET \
diff --git a/pom.xml b/pom.xml
index d451a3b..4f4bd3c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -28,13 +28,14 @@
true
all,-missing
17
+ 0.21.0
org.apache.thrift
libthrift
- 0.19.0
+ ${libthrift.version}
diff --git a/src/main/thrift/hive_metastore.thrift b/src/main/thrift/hive_metastore.thrift
index ca8d4c2..335fb9d 100644
--- a/src/main/thrift/hive_metastore.thrift
+++ b/src/main/thrift/hive_metastore.thrift
@@ -19,6 +19,10 @@
namespace java io.trino.hive.thrift.metastore
const string DDL_TIME = "transient_lastDdlTime"
+const byte ACCESSTYPE_NONE = 1;
+const byte ACCESSTYPE_READONLY = 2;
+const byte ACCESSTYPE_WRITEONLY = 4;
+const byte ACCESSTYPE_READWRITE = 8;
struct Version {
1: string version,
@@ -31,6 +35,14 @@ struct FieldSchema {
3: string comment
}
+// Key-value store to be used with selected
+// Metastore APIs (create, alter methods).
+// The client can pass environment properties / configs that can be
+// accessed in hooks.
+struct EnvironmentContext {
+ 1: map properties
+}
+
struct SQLPrimaryKey {
1: string table_db, // table schema
2: string table_name, // table name
@@ -108,6 +120,15 @@ struct SQLCheckConstraint {
9: bool rely_cstr // Rely/No Rely
}
+struct SQLAllTableConstraints {
+ 1: optional list primaryKeys,
+ 2: optional list foreignKeys,
+ 3: optional list uniqueConstraints,
+ 4: optional list notNullConstraints,
+ 5: optional list defaultConstraints,
+ 6: optional list checkConstraints
+}
+
struct Type {
1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types
2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE)
@@ -121,6 +142,7 @@ enum HiveObjectType {
TABLE = 3,
PARTITION = 4,
COLUMN = 5,
+ DATACONNECTOR = 6,
}
enum PrincipalType {
@@ -132,6 +154,24 @@ enum PrincipalType {
const string HIVE_FILTER_FIELD_OWNER = "hive_filter_field_owner__"
const string HIVE_FILTER_FIELD_PARAMS = "hive_filter_field_params__"
const string HIVE_FILTER_FIELD_LAST_ACCESS = "hive_filter_field_last_access__"
+const string HIVE_FILTER_FIELD_TABLE_NAME = "hive_filter_field_tableName__"
+const string HIVE_FILTER_FIELD_TABLE_TYPE = "hive_filter_field_tableType__"
+
+struct PropertySetRequest {
+ 1: required string nameSpace;
+ 2: map propertyMap;
+}
+
+struct PropertyGetRequest {
+ 1: required string nameSpace;
+ 2: string mapPrefix;
+ 3: optional string mapPredicate;
+ 4: optional list mapSelection;
+}
+
+struct PropertyGetResponse {
+ 1: map> properties;
+}
enum PartitionEventType {
LOAD_DONE = 1,
@@ -161,11 +201,14 @@ enum LockType {
SHARED_READ = 1,
SHARED_WRITE = 2,
EXCLUSIVE = 3,
+ EXCL_WRITE = 4,
}
enum CompactionType {
MINOR = 1,
MAJOR = 2,
+ REBALANCE = 3,
+ ABORT_TXN_CLEANUP = 4,
}
enum GrantRevokeType {
@@ -223,6 +266,11 @@ enum SchemaVersionState {
DELETED = 8
}
+enum DatabaseType {
+ NATIVE = 1,
+ REMOTE = 2
+}
+
struct HiveObjectRef{
1: HiveObjectType objectType,
2: string dbName,
@@ -268,6 +316,18 @@ struct GrantRevokePrivilegeResponse {
1: optional bool success;
}
+struct TruncateTableRequest {
+ 1: required string dbName,
+ 2: required string tableName,
+ 3: optional list partNames,
+ 4: optional i64 writeId=-1,
+ 5: optional string validWriteIdList,
+ 6: optional EnvironmentContext environmentContext
+}
+
+struct TruncateTableResponse {
+}
+
struct Role {
1: string roleName,
2: i32 createTime,
@@ -319,9 +379,10 @@ struct GrantRevokeRoleResponse {
struct Catalog {
1: string name, // Name of the catalog
2: optional string description, // description of the catalog
- 3: string locationUri // default storage location. When databases are created in
- // this catalog, if they do not specify a location, they will
- // be placed in this location.
+ 3: string locationUri, // default storage location. When databases are created in
+ // this catalog, if they do not specify a location, they will
+ // be placed in this location.
+ 4: optional i32 createTime // creation time of catalog in seconds since epoch
}
struct CreateCatalogRequest {
@@ -358,7 +419,12 @@ struct Database {
5: optional PrincipalPrivilegeSet privileges,
6: optional string ownerName,
7: optional PrincipalType ownerType,
- 8: optional string catalogName
+ 8: optional string catalogName,
+ 9: optional i32 createTime, // creation time of database in seconds since epoch
+ 10: optional string managedLocationUri, // directory for managed tables
+ 11: optional DatabaseType type,
+ 12: optional string connector_name,
+ 13: optional string remote_dbname
}
// This object holds the information needed by SerDes
@@ -401,67 +467,14 @@ struct StorageDescriptor {
12: optional bool storedAsSubDirectories // stored as subdirectories or not
}
-// table information
-struct Table {
- 1: string tableName, // name of the table
- 2: string dbName, // database name ('default')
- 3: string owner, // owner of this table
- 4: i32 createTime, // creation time of the table
- 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on)
- 6: i32 retention, // retention time
- 7: StorageDescriptor sd, // storage descriptor of the table
- 8: list partitionKeys, // partition keys of the table. only primitive types are supported
- 9: map parameters, // to store comments or any other user level parameters
- 10: string viewOriginalText, // original view text, null for non-view
- 11: string viewExpandedText, // expanded view text, null for non-view
- 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE
- 13: optional PrincipalPrivilegeSet privileges,
- 14: optional bool temporary=false,
- 15: optional bool rewriteEnabled, // rewrite enabled or not
- 16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation
- 17: optional string catName, // Name of the catalog the table is in
- 18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility)
- 19: optional i64 writeId=-1
-}
-
-struct Partition {
- 1: list values // string value is converted to appropriate partition key type
- 2: string dbName,
- 3: string tableName,
- 4: i32 createTime,
- 5: i32 lastAccessTime,
- 6: StorageDescriptor sd,
- 7: map parameters,
- 8: optional PrincipalPrivilegeSet privileges,
- 9: optional string catName,
- 10: optional i64 writeId=-1
-}
-
-struct PartitionWithoutSD {
- 1: list values // string value is converted to appropriate partition key type
- 2: i32 createTime,
- 3: i32 lastAccessTime,
- 4: string relativePath,
- 5: map parameters,
- 6: optional PrincipalPrivilegeSet privileges
-}
-
-struct PartitionSpecWithSharedSD {
- 1: list partitions,
- 2: StorageDescriptor sd,
-}
-
-struct PartitionListComposingSpec {
- 1: list partitions
-}
-
-struct PartitionSpec {
- 1: string dbName,
- 2: string tableName,
- 3: string rootPath,
- 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
- 5: optional PartitionListComposingSpec partitionList,
- 6: optional string catName
+struct CreationMetadata {
+ 1: required string catName,
+ 2: required string dbName,
+ 3: required string tblName,
+ 4: required set tablesUsed,
+ 5: optional string validTxnList,
+ 6: optional i64 materializationTime,
+ 7: optional list sourceTables
}
// column statistics
@@ -477,7 +490,8 @@ struct DoubleColumnStatsData {
2: optional double highValue,
3: required i64 numNulls,
4: required i64 numDVs,
-5: optional binary bitVectors
+5: optional binary bitVectors,
+6: optional binary histogram
}
struct LongColumnStatsData {
@@ -485,7 +499,8 @@ struct LongColumnStatsData {
2: optional i64 highValue,
3: required i64 numNulls,
4: required i64 numDVs,
-5: optional binary bitVectors
+5: optional binary bitVectors,
+6: optional binary histogram
}
struct StringColumnStatsData {
@@ -514,7 +529,8 @@ struct DecimalColumnStatsData {
2: optional Decimal highValue,
3: required i64 numNulls,
4: required i64 numDVs,
-5: optional binary bitVectors
+5: optional binary bitVectors,
+6: optional binary histogram
}
struct Date {
@@ -526,7 +542,21 @@ struct DateColumnStatsData {
2: optional Date highValue,
3: required i64 numNulls,
4: required i64 numDVs,
-5: optional binary bitVectors
+5: optional binary bitVectors,
+6: optional binary histogram
+}
+
+struct Timestamp {
+1: required i64 secondsSinceEpoch
+}
+
+struct TimestampColumnStatsData {
+1: optional Timestamp lowValue,
+2: optional Timestamp highValue,
+3: required i64 numNulls,
+4: required i64 numDVs,
+5: optional binary bitVectors,
+6: optional binary histogram
}
union ColumnStatisticsData {
@@ -536,7 +566,8 @@ union ColumnStatisticsData {
4: StringColumnStatsData stringStats,
5: BinaryColumnStatsData binaryStats,
6: DecimalColumnStatsData decimalStats,
-7: DateColumnStatsData dateStats
+7: DateColumnStatsData dateStats,
+8: TimestampColumnStatsData timestampStats
}
struct ColumnStatisticsObj {
@@ -556,32 +587,135 @@ struct ColumnStatisticsDesc {
struct ColumnStatistics {
1: required ColumnStatisticsDesc statsDesc,
-2: required list statsObj;
+2: required list statsObj,
+3: optional bool isStatsCompliant, // Are the stats isolation-level-compliant with the
+ // the calling query?
+4: optional string engine = "hive"
+}
+
+// FileMetadata represents the table-level (in case of unpartitioned) or partition-level
+// file metadata. Each partition could have more than 1 files and hence the list of
+// binary data field. Each value in data field corresponds to metadata for one file.
+struct FileMetadata {
+ // current supported type mappings are
+ // 1 -> IMPALA
+ 1: byte type = 1
+ 2: byte version = 1
+ 3: list data
+}
+
+// this field can be used to store repeatitive information
+// (like network addresses in filemetadata). Instead of
+// sending the same object repeatedly, we can send the indices
+// corresponding to the object in this list.
+struct ObjectDictionary {
+ // the key can be used to determine the object type
+ // the value is the list of the objects which can be accessed
+ // using their indices. These indices can be used to send instead of
+ // full object which can reduce the payload significantly in case of
+ // repetitive objects.
+ 1: required map> values
}
-struct AggrStats {
-1: required list colStats,
-2: required i64 partsFound // number of partitions for which stats were found
+// table information
+struct Table {
+ 1: string tableName, // name of the table
+ 2: string dbName, // database name ('default')
+ 3: string owner, // owner of this table
+ 4: i32 createTime, // creation time of the table
+ 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on)
+ 6: i32 retention, // retention time
+ 7: StorageDescriptor sd, // storage descriptor of the table
+ 8: list partitionKeys, // partition keys of the table. only primitive types are supported
+ 9: map parameters, // to store comments or any other user level parameters
+ 10: string viewOriginalText, // original view text, null for non-view
+ 11: string viewExpandedText, // expanded view text, null for non-view
+ 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE
+ 13: optional PrincipalPrivilegeSet privileges,
+ 14: optional bool temporary=false,
+ 15: optional bool rewriteEnabled, // rewrite enabled or not
+ 16: optional CreationMetadata creationMetadata, // only for MVs, it stores table names used and txn list at MV creation
+ 17: optional string catName, // Name of the catalog the table is in
+ 18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility)
+ 19: optional i64 writeId=-1,
+ 20: optional bool isStatsCompliant,
+ 21: optional ColumnStatistics colStats, // column statistics for table
+ 22: optional byte accessType,
+ 23: optional list requiredReadCapabilities,
+ 24: optional list requiredWriteCapabilities
+ 25: optional i64 id, // id of the table. It will be ignored if set. It's only for
+ // read purposes
+ 26: optional FileMetadata fileMetadata, // optional serialized file-metadata for this table
+ // for certain execution engines
+ 27: optional ObjectDictionary dictionary,
+ 28: optional i64 txnId, // txnId associated with the table creation
+}
+
+struct SourceTable {
+ 1: required Table table,
+ 2: required i64 insertedCount,
+ 3: required i64 updatedCount,
+ 4: required i64 deletedCount
}
-struct AlterPartitionsRequest {
-1: optional string catName,
-2: required string dbName,
-3: required string tableName,
-4: required list partitions,
-5: optional EnvironmentContext environmentContext,
-6: optional i64 writeId=-1,
-7: optional string validWriteIdList
+struct Partition {
+ 1: list values // string value is converted to appropriate partition key type
+ 2: string dbName,
+ 3: string tableName,
+ 4: i32 createTime,
+ 5: i32 lastAccessTime,
+ 6: StorageDescriptor sd,
+ 7: map parameters,
+ 8: optional PrincipalPrivilegeSet privileges,
+ 9: optional string catName,
+ 10: optional i64 writeId=-1,
+ 11: optional bool isStatsCompliant,
+ 12: optional ColumnStatistics colStats, // column statistics for partition
+ 13: optional FileMetadata fileMetadata // optional serialized file-metadata useful for certain execution engines
}
-struct AlterPartitionsResponse {
+struct PartitionWithoutSD {
+ 1: list values // string value is converted to appropriate partition key type
+ 2: i32 createTime,
+ 3: i32 lastAccessTime,
+ 4: string relativePath,
+ 5: map parameters,
+ 6: optional PrincipalPrivilegeSet privileges
+}
+
+struct PartitionSpecWithSharedSD {
+ 1: list partitions,
+ 2: StorageDescriptor sd,
+}
+
+
+struct PartitionListComposingSpec {
+ 1: list partitions
+}
+
+struct PartitionSpec {
+ 1: string dbName,
+ 2: string tableName,
+ 3: string rootPath,
+ 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
+ 5: optional PartitionListComposingSpec partitionList,
+ 6: optional string catName,
+ 7: optional i64 writeId=-1,
+ 8: optional bool isStatsCompliant
+}
+
+struct AggrStats {
+1: required list colStats,
+2: required i64 partsFound, // number of partitions for which stats were found
+3: optional bool isStatsCompliant
}
struct SetPartitionsStatsRequest {
1: required list colStats,
-2: optional bool needMerge //stats need to be merged with the existing stats
+2: optional bool needMerge, //stats need to be merged with the existing stats
3: optional i64 writeId=-1, // writeId for the current query that updates the stats
4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
+5: optional string engine = "hive" //engine creating the current request
}
struct SetPartitionsStatsResponse {
@@ -595,18 +729,12 @@ struct Schema {
2: map properties
}
-// Key-value store to be used with selected
-// Metastore APIs (create, alter methods).
-// The client can pass environment properties / configs that can be
-// accessed in hooks.
-struct EnvironmentContext {
- 1: map properties
-}
-
struct PrimaryKeysRequest {
1: required string db_name,
2: required string tbl_name,
- 3: optional string catName
+ 3: optional string catName,
+ 4: optional string validWriteIdList,
+ 5: optional i64 tableId=-1
}
struct PrimaryKeysResponse {
@@ -617,8 +745,10 @@ struct ForeignKeysRequest {
1: string parent_db_name,
2: string parent_tbl_name,
3: string foreign_db_name,
- 4: string foreign_tbl_name
- 5: optional string catName // No cross catalog constraints
+ 4: string foreign_tbl_name,
+ 5: optional string catName, // No cross catalog constraints
+ 6: optional string validWriteIdList,
+ 7: optional i64 tableId=-1
}
struct ForeignKeysResponse {
@@ -629,6 +759,8 @@ struct UniqueConstraintsRequest {
1: required string catName,
2: required string db_name,
3: required string tbl_name,
+ 4: optional string validWriteIdList,
+ 5: optional i64 tableId=-1
}
struct UniqueConstraintsResponse {
@@ -639,6 +771,8 @@ struct NotNullConstraintsRequest {
1: required string catName,
2: required string db_name,
3: required string tbl_name,
+ 4: optional string validWriteIdList,
+ 5: optional i64 tableId=-1
}
struct NotNullConstraintsResponse {
@@ -648,7 +782,9 @@ struct NotNullConstraintsResponse {
struct DefaultConstraintsRequest {
1: required string catName,
2: required string db_name,
- 3: required string tbl_name
+ 3: required string tbl_name,
+ 4: optional string validWriteIdList,
+ 5: optional i64 tableId=-1
}
struct DefaultConstraintsResponse {
@@ -658,13 +794,26 @@ struct DefaultConstraintsResponse {
struct CheckConstraintsRequest {
1: required string catName,
2: required string db_name,
- 3: required string tbl_name
+ 3: required string tbl_name,
+ 4: optional string validWriteIdList,
+ 5: optional i64 tableId=-1
}
struct CheckConstraintsResponse {
1: required list checkConstraints
}
+struct AllTableConstraintsRequest {
+ 1: required string dbName,
+ 2: required string tblName,
+ 3: required string catName,
+ 4: optional string validWriteIdList,
+ 5: optional i64 tableId=-1
+}
+
+struct AllTableConstraintsResponse {
+ 1: required SQLAllTableConstraints allTableConstraints
+}
struct DropConstraintRequest {
1: required string dbname,
@@ -704,28 +853,46 @@ struct PartitionsByExprResult {
2: required bool hasUnknownPartitions
}
+// Return type for get_partitions_spec_by_expr
+struct PartitionsSpecByExprResult {
+1: required list partitionsSpec,
+// Whether the results has any (currently, all) partitions which may or may not match
+2: required bool hasUnknownPartitions
+}
+
struct PartitionsByExprRequest {
1: required string dbName,
2: required string tblName,
3: required binary expr,
4: optional string defaultPartitionName,
- 5: optional i16 maxParts=-1
- 6: optional string catName
+ 5: optional i16 maxParts=-1,
+ 6: optional string catName,
+ 7: optional string order
+ 8: optional string validWriteIdList,
+ 9: optional i64 id=-1, // table id
+ 10: optional bool skipColumnSchemaForPartition,
+ 11: optional string includeParamKeyPattern,
+ 12: optional string excludeParamKeyPattern
}
struct TableStatsResult {
- 1: required list tableStats
+ 1: required list tableStats,
+ 2: optional bool isStatsCompliant
}
struct PartitionsStatsResult {
- 1: required map> partStats
+ 1: required map> partStats,
+ 2: optional bool isStatsCompliant
}
struct TableStatsRequest {
1: required string dbName,
2: required string tblName,
3: required list colNames
- 4: optional string catName
+ 4: optional string catName,
+ 5: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
+ 6: optional string engine = "hive", //engine creating the current request
+ 7: optional i64 id=-1 // table id
}
struct PartitionsStatsRequest {
@@ -733,12 +900,16 @@ struct PartitionsStatsRequest {
2: required string tblName,
3: required list colNames,
4: required list partNames,
- 5: optional string catName
+ 5: optional string catName,
+ 6: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
+ 7: optional string engine = "hive" //engine creating the current request
}
// Return type for add_partitions_req
struct AddPartitionsResult {
1: optional list partitions,
+ 2: optional bool isStatsCompliant,
+ 3: optional list partitionColSchema
}
// Request type for add_partitions_req
@@ -748,7 +919,11 @@ struct AddPartitionsRequest {
3: required list parts,
4: required bool ifNotExists,
5: optional bool needResult=true,
- 6: optional string catName
+ 6: optional string catName,
+ 7: optional string validWriteIdList,
+ 8: optional bool skipColumnSchemaForPartition,
+ 9: optional list partitionColSchema,
+ 10: optional EnvironmentContext environmentContext
}
// Return type for drop_partitions_req
@@ -777,7 +952,18 @@ struct DropPartitionsRequest {
6: optional bool ignoreProtection,
7: optional EnvironmentContext environmentContext,
8: optional bool needResult=true,
- 9: optional string catName
+ 9: optional string catName,
+ 10: optional bool skipColumnSchemaForPartition
+}
+
+struct DropPartitionRequest {
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tblName,
+ 4: optional string partName,
+ 5: optional list partVals,
+ 6: optional bool deleteData,
+ 7: optional EnvironmentContext environmentContext
}
struct PartitionValuesRequest {
@@ -789,7 +975,8 @@ struct PartitionValuesRequest {
6: optional list partitionOrder;
7: optional bool ascending = true;
8: optional i64 maxParts = -1;
- 9: optional string catName
+ 9: optional string catName,
+ 10: optional string validWriteIdList
}
struct PartitionValuesRow {
@@ -800,6 +987,38 @@ struct PartitionValuesResponse {
1: required list partitionValues;
}
+struct GetPartitionsByNamesRequest {
+ 1: required string db_name,
+ 2: required string tbl_name,
+ 3: optional list names,
+ 4: optional bool get_col_stats,
+ 5: optional list processorCapabilities,
+ 6: optional string processorIdentifier,
+ 7: optional string engine = "hive",
+ 8: optional string validWriteIdList,
+ 9: optional bool getFileMetadata,
+ 10: optional i64 id=-1, // table id
+ 11: optional bool skipColumnSchemaForPartition,
+ 12: optional string includeParamKeyPattern,
+ 13: optional string excludeParamKeyPattern
+}
+
+struct GetPartitionsByNamesResult {
+ 1: required list partitions
+ 2: optional ObjectDictionary dictionary
+}
+
+struct DataConnector {
+ 1: string name,
+ 2: string type,
+ 3: string url,
+ 4: optional string description,
+ 5: optional map parameters,
+ 6: optional string ownerName,
+ 7: optional PrincipalType ownerType,
+ 8: optional i32 createTime
+}
+
enum FunctionType {
JAVA = 1,
}
@@ -810,6 +1029,23 @@ enum ResourceType {
ARCHIVE = 3,
}
+enum TxnType {
+ DEFAULT = 0,
+ REPL_CREATED = 1,
+ READ_ONLY = 2,
+ COMPACTION = 3,
+ MATER_VIEW_REBUILD = 4,
+ SOFT_DELETE = 5,
+ REBALANCE_COMPACTION = 6
+}
+
+// specifies which info to return with GetTablesExtRequest
+enum GetTablesExtRequestFields {
+ ACCESS_TYPE = 1, // return accessType
+ PROCESSOR_CAPABILITIES = 2, // return ALL Capabilities for each Tables
+ ALL = 2147483647
+}
+
struct ResourceUri {
1: ResourceType resourceType,
2: string uri,
@@ -860,6 +1096,7 @@ struct OpenTxnRequest {
4: optional string agentInfo = "Unknown",
5: optional string replPolicy,
6: optional list replSrcTxnIds,
+ 7: optional TxnType txn_type = TxnType.DEFAULT,
}
struct OpenTxnsResponse {
@@ -869,15 +1106,57 @@ struct OpenTxnsResponse {
struct AbortTxnRequest {
1: required i64 txnid,
2: optional string replPolicy,
+ 3: optional TxnType txn_type,
+ 4: optional i64 errorCode,
}
struct AbortTxnsRequest {
1: required list txn_ids,
+ 2: optional i64 errorCode,
+}
+
+struct CommitTxnKeyValue {
+ 1: required i64 tableId,
+ 2: required string key,
+ 3: required string value,
+}
+
+struct WriteEventInfo {
+ 1: required i64 writeId,
+ 2: required string database,
+ 3: required string table,
+ 4: required string files,
+ 5: optional string partition,
+ 6: optional string tableObj, // repl txn task does not need table object for commit
+ 7: optional string partitionObj,
+}
+
+struct ReplLastIdInfo {
+ 1: required string database,
+ 2: required i64 lastReplId,
+ 3: optional string table,
+ 4: optional string catalog,
+ 5: optional list partitionList,
+}
+
+struct UpdateTransactionalStatsRequest {
+ 1: required i64 tableId,
+ 2: required i64 insertCount,
+ 3: required i64 updatedCount,
+ 4: required i64 deletedCount,
}
struct CommitTxnRequest {
1: required i64 txnid,
2: optional string replPolicy,
+ // Information related to write operations done in this transaction.
+ 3: optional list writeEventInfos,
+ // Information to update the last repl id of table/partition along with commit txn (replication from 2.6 to 3.0)
+ 4: optional ReplLastIdInfo replLastIdInfo,
+ // An optional key/value to store atomically with the transaction
+ 5: optional CommitTxnKeyValue keyValue,
+ 6: optional bool exclWriteEnabled = true,
+ 7: optional TxnType txn_type,
}
struct ReplTblWriteIdStateRequest {
@@ -892,7 +1171,8 @@ struct ReplTblWriteIdStateRequest {
// Request msg to get the valid write ids list for the given list of tables wrt to input validTxnList
struct GetValidWriteIdsRequest {
1: required list fullTableNames, // Full table names of format .
- 2: required string validTxnList, // Valid txn list string wrt the current txn of the caller
+ 2: optional string validTxnList, // Valid txn list string wrt the current txn of the caller
+ 3: optional i64 writeId, //write id to be used to get the current txn id
}
// Valid Write ID list of one table wrt to current txn
@@ -909,6 +1189,12 @@ struct GetValidWriteIdsResponse {
1: required list tblValidWriteIds,
}
+// Map for allocated write id against the txn for which it is allocated
+struct TxnToWriteId {
+ 1: required i64 txnId,
+ 2: required i64 writeId,
+}
+
// Request msg to allocate table write ids for the given list of txns
struct AllocateTableWriteIdsRequest {
1: required string dbName,
@@ -919,18 +1205,31 @@ struct AllocateTableWriteIdsRequest {
4: optional string replPolicy,
// The list is assumed to be sorted by both txnids and write ids. The write id list is assumed to be contiguous.
5: optional list srcTxnToWriteIdList,
-}
-
-// Map for allocated write id against the txn for which it is allocated
-struct TxnToWriteId {
- 1: required i64 txnId,
- 2: required i64 writeId,
+ // If false, reuse previously allocate writeIds for txnIds. If true, remove older txnId to writeIds mappings
+ // and regenerate (this is useful during re-compilation when we need to ensure writeIds are regenerated)
+ 6: optional bool reallocate = false;
}
struct AllocateTableWriteIdsResponse {
1: required list txnToWriteIds,
}
+struct MaxAllocatedTableWriteIdRequest {
+ 1: required string dbName,
+ 2: required string tableName,
+}
+struct MaxAllocatedTableWriteIdResponse {
+ 1: required i64 maxWriteId,
+}
+struct SeedTableWriteIdsRequest {
+ 1: required string dbName,
+ 2: required string tableName,
+ 3: required i64 seedWriteId,
+}
+struct SeedTxnIdRequest {
+ 1: required i64 seedTxnId,
+}
+
struct LockComponent {
1: required LockType type,
2: required LockLevel level,
@@ -948,11 +1247,15 @@ struct LockRequest {
3: required string user, // used in 'show locks' to help admins find who has open locks
4: required string hostname, // used in 'show locks' to help admins find who has open locks
5: optional string agentInfo = "Unknown",
+ 6: optional bool zeroWaitReadEnabled = false,
+ 7: optional bool exclusiveCTAS = false,
+ 8: optional bool locklessReadsEnabled = false
}
struct LockResponse {
1: required i64 lockid,
2: required LockState state,
+ 3: optional string errorMessage
}
struct CheckLockRequest {
@@ -970,6 +1273,7 @@ struct ShowLocksRequest {
2: optional string tablename,
3: optional string partname,
4: optional bool isExtended=false,
+ 5: optional i64 txnid,
}
struct ShowLocksResponseElement {
@@ -1011,21 +1315,89 @@ struct HeartbeatTxnRangeResponse {
}
struct CompactionRequest {
- 1: required string dbname,
- 2: required string tablename,
- 3: optional string partitionname,
- 4: required CompactionType type,
- 5: optional string runas,
+ 1: required string dbname
+ 2: required string tablename
+ 3: optional string partitionname
+ 4: required CompactionType type
+ 5: optional string runas
6: optional map properties
+ 7: optional string initiatorId
+ 8: optional string initiatorVersion
+ 9: optional string poolName
+ 10: optional i32 numberOfBuckets
+ 11: optional string orderByClause;
+}
+
+struct CompactionInfoStruct {
+ 1: required i64 id,
+ 2: required string dbname,
+ 3: required string tablename,
+ 4: optional string partitionname,
+ 5: required CompactionType type,
+ 6: optional string runas,
+ 7: optional string properties,
+ 8: optional bool toomanyaborts,
+ 9: optional string state,
+ 10: optional string workerId,
+ 11: optional i64 start,
+ 12: optional i64 highestWriteId,
+ 13: optional string errorMessage,
+ 14: optional bool hasoldabort,
+ 15: optional i64 enqueueTime,
+ 16: optional i64 retryRetention,
+ 17: optional string poolname
+ 18: optional i32 numberOfBuckets
+ 19: optional string orderByClause;
+}
+
+struct OptionalCompactionInfoStruct {
+ 1: optional CompactionInfoStruct ci,
+}
+
+enum CompactionMetricsMetricType {
+ NUM_OBSOLETE_DELTAS,
+ NUM_DELTAS,
+ NUM_SMALL_DELTAS,
+}
+
+struct CompactionMetricsDataStruct {
+ 1: required string dbname
+ 2: required string tblname
+ 3: optional string partitionname
+ 4: required CompactionMetricsMetricType type
+ 5: required i32 metricvalue
+ 6: required i32 version
+ 7: required i32 threshold
+}
+
+struct CompactionMetricsDataResponse {
+ 1: optional CompactionMetricsDataStruct data
+}
+
+struct CompactionMetricsDataRequest {
+ 1: required string dbName,
+ 2: required string tblName,
+ 3: optional string partitionName
+ 4: required CompactionMetricsMetricType type
}
struct CompactionResponse {
1: required i64 id,
2: required string state,
- 3: required bool accepted
+ 3: required bool accepted,
+ 4: optional string errormessage
}
struct ShowCompactRequest {
+ 1: optional i64 id,
+ 2: optional string poolName,
+ 3: optional string dbName,
+ 4: optional string tbName,
+ 5: optional string partName,
+ 6: optional CompactionType type,
+ 7: optional string state,
+ 8: optional i64 limit,
+ 9: optional string order
}
struct ShowCompactResponseElement {
@@ -1042,12 +1414,57 @@ struct ShowCompactResponseElement {
11: optional i64 endTime,
12: optional string hadoopJobId = "None",
13: optional i64 id,
+ 14: optional string errorMessage,
+ 15: optional i64 enqueueTime,
+ 16: optional string workerVersion,
+ 17: optional string initiatorId,
+ 18: optional string initiatorVersion,
+ 19: optional i64 cleanerStart,
+ 20: optional string poolName,
+ 21: optional i64 nextTxnId,
+ 22: optional i64 txnId,
+ 23: optional i64 commitTime,
+ 24: optional i64 hightestWriteId
+
}
struct ShowCompactResponse {
1: required list compacts,
}
+struct AbortCompactionRequest {
+ 1: required list compactionIds,
+ 2: optional string type,
+ 3: optional string poolName
+}
+
+struct AbortCompactionResponseElement {
+ 1: required i64 compactionId,
+ 2: optional string status,
+ 3: optional string message
+}
+
+struct AbortCompactResponse {
+ 1: required map abortedcompacts,
+}
+
+struct GetLatestCommittedCompactionInfoRequest {
+ 1: required string dbname,
+ 2: required string tablename,
+ 3: optional list partitionnames,
+ 4: optional i64 lastCompactionId,
+}
+
+struct GetLatestCommittedCompactionInfoResponse {
+ 1: required list compactions,
+}
+
+struct FindNextCompactRequest {
+ 1: optional string workerId,
+ 2: optional string workerVersion,
+ 3: optional string poolName
+}
+
struct AddDynamicPartitions {
1: required i64 txnid,
2: required i64 writeid,
@@ -1066,18 +1483,14 @@ struct BasicTxnInfo {
6: optional string partitionname
}
-struct CreationMetadata {
- 1: required string catName
- 2: required string dbName,
- 3: required string tblName,
- 4: required set tablesUsed,
- 5: optional string validTxnList,
- 6: optional i64 materializationTime
-}
struct NotificationEventRequest {
1: required i64 lastEvent,
2: optional i32 maxEvents,
+ 3: optional list eventTypeSkipList,
+ 4: optional string catName,
+ 5: optional string dbName,
+ 6: optional list tableNames
}
struct NotificationEvent {
@@ -1102,7 +1515,10 @@ struct CurrentNotificationEventId {
struct NotificationEventsCountRequest {
1: required i64 fromEventId,
2: required string dbName,
- 3: optional string catName
+ 3: optional string catName,
+ 4: optional i64 toEventId,
+ 5: optional i64 limit,
+ 6: optional list tableNames
}
struct NotificationEventsCountResponse {
@@ -1114,10 +1530,18 @@ struct InsertEventRequestData {
2: required list filesAdded,
// Checksum of files (hex string of checksum byte payload)
3: optional list filesAddedChecksum,
+ // Used by acid operation to create the sub directory
+ 4: optional list subDirectoryList,
+ // partition value which was inserted (used in case of bulk insert events)
+ 5: optional list partitionVal
}
union FireEventRequestData {
- 1: InsertEventRequestData insertData
+ 1: optional InsertEventRequestData insertData,
+ // used to fire insert events on multiple partitions
+ 2: optional list insertDatas,
+ // Identify if it is a refresh or invalidate event
+ 3: optional bool refreshEvent
}
struct FireEventRequest {
@@ -1127,11 +1551,37 @@ struct FireEventRequest {
// subevent as I assume they'll be used across most event types.
3: optional string dbName,
4: optional string tableName,
+ // ignored if event request data contains multiple insert event datas
5: optional list partitionVals,
6: optional string catName,
+ 7: optional map tblParams,
}
struct FireEventResponse {
+ 1: list eventIds
+}
+
+struct WriteNotificationLogRequest {
+ 1: required i64 txnId,
+ 2: required i64 writeId,
+ 3: required string db,
+ 4: required string table,
+ 5: required InsertEventRequestData fileInfo,
+ 6: optional list partitionVals,
+}
+
+struct WriteNotificationLogResponse {
+ // NOP for now, this is just a place holder for future responses
+}
+
+struct WriteNotificationLogBatchRequest {
+ 1: required string catalog,
+ 2: required string db,
+ 3: required string table,
+ 4: required list requestList,
+}
+
+struct WriteNotificationLogBatchResponse {
// NOP for now, this is just a place holder for future responses
}
@@ -1212,47 +1662,127 @@ enum ClientCapability {
INSERT_ONLY_TABLES = 2
}
-
struct ClientCapabilities {
1: required list values
}
+/*
+ * Generic request API, providing different kinds of filtering and controlling output.
+ *
+ * The API entry point is get_partitions_with_specs() and getTables, which is based on a single
+ * request/response object model.
+ *
+ * The request defines any filtering that should be done for partitions as well as the list of fields that should be
+ * returned (this is called ProjectionSpec). Projection is simply a list of dot separated strings which represent
+ * the fields which that be returned. Projection may also include whitelist or blacklist of parameters to include in
+ * the partition. When both blacklist and whitelist are present, the blacklist supersedes the
+ * whitelist in case of conflicts.
+ *
+ * Filter spec is the generalization of various types of partition and table filtering. Partitions and tables can be
+ * filtered by names, by values or by partition expressions.
+ */
+
+struct GetProjectionsSpec {
+ // fieldList is a list of dot separated strings which represent the fields which must be returned.
+ // Any other field which is not in the fieldList may be unset in the returned partitions (it
+ // is up to the implementation to decide whether it chooses to include or exclude such fields).
+ // E.g. setting the field list to sd.location, serdeInfo.name, sd.cols.name, sd.cols.type will
+ // return partitions which will have location field set in the storage descriptor. Also the serdeInfo
+ // in the returned storage descriptor will only have name field set. This applies to multi-valued
+ // fields as well like sd.cols, so in the example above only name and type fields will be set for sd.cols.
+ // If the fieldList is empty or not present, all the fields will be set
+ 1: list fieldList;
+ // SQL-92 compliant regex pattern for param keys to be included
+ // _ or % wildcards are supported. '_' represent one character and '%' represents 0 or more characters
+ // Currently this is unsupported when fetching tables.
+ 2: string includeParamKeyPattern;
+ // SQL-92 compliant regex pattern for param keys to be excluded
+ // _ or % wildcards are supported. '_' represent one character and '%' represents 0 or more characters
+ // Current this is unsupported when fetching tables.
+ 3: string excludeParamKeyPattern;
+}
+
struct GetTableRequest {
1: required string dbName,
2: required string tblName,
3: optional ClientCapabilities capabilities,
- 4: optional string catName
+ 4: optional string catName,
+ 6: optional string validWriteIdList,
+ 7: optional bool getColumnStats,
+ 8: optional list processorCapabilities,
+ 9: optional string processorIdentifier,
+ 10: optional string engine = "hive",
+ 11: optional i64 id=-1 // table id
}
struct GetTableResult {
- 1: required Table table
+ 1: required Table table,
+ 2: optional bool isStatsCompliant
}
struct GetTablesRequest {
1: required string dbName,
2: optional list tblNames,
3: optional ClientCapabilities capabilities,
- 4: optional string catName
+ 4: optional string catName,
+ 5: optional list processorCapabilities,
+ 6: optional string processorIdentifier,
+ 7: optional GetProjectionsSpec projectionSpec,
+ 8: optional string tablesPattern
}
struct GetTablesResult {
1: required list tables
}
-struct AlterTableRequest {
- 1: optional string catName,
- 2: required string dbName,
- 3: required string tableName,
- 4: required Table table,
- 5: optional EnvironmentContext environmentContext,
- 6: optional i64 writeId=-1,
- 7: optional string validWriteIdList
- 8: optional list processorCapabilities,
- 9: optional string processorIdentifier
-// TODO: also add cascade here, out of envCtx
+struct GetTablesExtRequest {
+ 1: required string catalog,
+ 2: required string database,
+ 3: required string tableNamePattern, // table name matching pattern
+ 4: required i32 requestedFields, // ORed GetTablesExtRequestFields
+ 5: optional i32 limit, // maximum number of tables returned (0=all)
+ 6: optional list processorCapabilities, // list of capabilities “possessed” by the client
+ 7: optional string processorIdentifier
}
-struct AlterTableResponse {
+// response to GetTablesExtRequest call
+struct ExtendedTableInfo {
+ 1: required string tblName, // always returned
+ 2: optional i32 accessType, // if AccessType set
+ 3: optional list requiredReadCapabilities // capabilities required for read access
+ 4: optional list requiredWriteCapabilities // capabilities required for write access
+}
+
+struct DropTableRequest {
+ 1: optional string catalogName,
+ 2: required string dbName,
+ 3: required string tableName,
+ 4: optional bool deleteData,
+ 5: optional EnvironmentContext envContext,
+ 6: optional bool dropPartitions
+}
+
+struct GetDatabaseRequest {
+ 1: optional string name,
+ 2: optional string catalogName,
+ 3: optional list processorCapabilities,
+ 4: optional string processorIdentifier
+}
+
+struct AlterDatabaseRequest {
+ 1: required string oldDbName,
+ 2: required Database newDb
+}
+
+struct DropDatabaseRequest {
+ 1: required string name,
+ 2: optional string catalogName,
+ 3: required bool ignoreUnknownDb,
+ 4: required bool deleteData,
+ 5: required bool cascade,
+ 6: optional bool softDelete=false,
+ 7: optional i64 txnId=0,
+ 8: optional bool deleteManagedDir=true
}
// Request type for cm_recycle
@@ -1271,10 +1801,13 @@ struct TableMeta {
3: required string tableType;
4: optional string comments;
5: optional string catName;
+ 6: optional string ownerName;
+ 7: optional PrincipalType ownerType;
}
struct Materialization {
1: required bool sourceTablesUpdateDeleteModified;
+ 2: required bool sourceTablesCompacted;
}
// Data types for workload management.
@@ -1295,6 +1828,7 @@ struct WMResourcePlan {
2: optional WMResourcePlanStatus status;
3: optional i32 queryParallelism;
4: optional string defaultPoolPath;
+ 5: optional string ns;
}
struct WMNullableResourcePlan {
@@ -1304,6 +1838,7 @@ struct WMNullableResourcePlan {
5: optional bool isSetQueryParallelism;
6: optional string defaultPoolPath;
7: optional bool isSetDefaultPoolPath;
+ 8: optional string ns;
}
struct WMPool {
@@ -1312,6 +1847,7 @@ struct WMPool {
3: optional double allocFraction;
4: optional i32 queryParallelism;
5: optional string schedulingPolicy;
+ 6: optional string ns;
}
@@ -1322,6 +1858,7 @@ struct WMNullablePool {
4: optional i32 queryParallelism;
5: optional string schedulingPolicy;
6: optional bool isSetSchedulingPolicy;
+ 7: optional string ns;
}
struct WMTrigger {
@@ -1330,6 +1867,7 @@ struct WMTrigger {
3: optional string triggerExpression;
4: optional string actionExpression;
5: optional bool isInUnmanaged;
+ 6: optional string ns;
}
struct WMMapping {
@@ -1338,11 +1876,13 @@ struct WMMapping {
3: required string entityName;
4: optional string poolPath;
5: optional i32 ordering;
+ 6: optional string ns;
}
struct WMPoolTrigger {
1: required string pool;
2: required string trigger;
+ 3: optional string ns;
}
struct WMFullResourcePlan {
@@ -1364,6 +1904,7 @@ struct WMCreateResourcePlanResponse {
}
struct WMGetActiveResourcePlanRequest {
+ 1: optional string ns;
}
struct WMGetActiveResourcePlanResponse {
@@ -1372,6 +1913,7 @@ struct WMGetActiveResourcePlanResponse {
struct WMGetResourcePlanRequest {
1: optional string resourcePlanName;
+ 2: optional string ns;
}
struct WMGetResourcePlanResponse {
@@ -1379,6 +1921,7 @@ struct WMGetResourcePlanResponse {
}
struct WMGetAllResourcePlanRequest {
+ 1: optional string ns;
}
struct WMGetAllResourcePlanResponse {
@@ -1391,6 +1934,7 @@ struct WMAlterResourcePlanRequest {
3: optional bool isEnableAndActivate;
4: optional bool isForceDeactivate;
5: optional bool isReplace;
+ 6: optional string ns;
}
struct WMAlterResourcePlanResponse {
@@ -1399,6 +1943,7 @@ struct WMAlterResourcePlanResponse {
struct WMValidateResourcePlanRequest {
1: optional string resourcePlanName;
+ 2: optional string ns;
}
struct WMValidateResourcePlanResponse {
@@ -1408,6 +1953,7 @@ struct WMValidateResourcePlanResponse {
struct WMDropResourcePlanRequest {
1: optional string resourcePlanName;
+ 2: optional string ns;
}
struct WMDropResourcePlanResponse {
@@ -1430,6 +1976,7 @@ struct WMAlterTriggerResponse {
struct WMDropTriggerRequest {
1: optional string resourcePlanName;
2: optional string triggerName;
+ 3: optional string ns;
}
struct WMDropTriggerResponse {
@@ -1437,6 +1984,7 @@ struct WMDropTriggerResponse {
struct WMGetTriggersForResourePlanRequest {
1: optional string resourcePlanName;
+ 2: optional string ns;
}
struct WMGetTriggersForResourePlanResponse {
@@ -1461,6 +2009,7 @@ struct WMAlterPoolResponse {
struct WMDropPoolRequest {
1: optional string resourcePlanName;
2: optional string poolPath;
+ 3: optional string ns;
}
struct WMDropPoolResponse {
@@ -1486,6 +2035,7 @@ struct WMCreateOrDropTriggerToPoolMappingRequest {
2: optional string triggerName;
3: optional string poolPath;
4: optional bool drop;
+ 5: optional string ns;
}
struct WMCreateOrDropTriggerToPoolMappingResponse {
@@ -1569,6 +2119,374 @@ struct GetRuntimeStatsRequest {
2: required i32 maxCreateTime
}
+struct CreateTableRequest {
+ 1: required Table table,
+ 2: optional EnvironmentContext envContext,
+ 3: optional list primaryKeys,
+ 4: optional list foreignKeys,
+ 5: optional list uniqueConstraints,
+ 6: optional list notNullConstraints,
+ 7: optional list defaultConstraints,
+ 8: optional list checkConstraints,
+ 9: optional list processorCapabilities,
+ 10: optional string processorIdentifier
+}
+
+struct CreateDatabaseRequest {
+ 1: required string databaseName,
+ 2: optional string description,
+ 3: optional string locationUri,
+ 4: optional map parameters,
+ 5: optional PrincipalPrivilegeSet privileges,
+ 6: optional string ownerName,
+ 7: optional PrincipalType ownerType,
+ 8: optional string catalogName,
+ 9: optional i32 createTime,
+ 10: optional string managedLocationUri,
+ 11: optional DatabaseType type,
+ 12: optional string dataConnectorName,
+ 13: optional string remote_dbname
+}
+
+struct CreateDataConnectorRequest {
+ 1: required DataConnector connector
+}
+
+struct GetDataConnectorRequest {
+ 1: required string connectorName
+}
+
+struct AlterDataConnectorRequest {
+ 1: required string connectorName,
+ 2: required DataConnector newConnector
+}
+
+struct DropDataConnectorRequest {
+ 1: required string connectorName,
+ 2: optional bool ifNotExists,
+ 3: optional bool checkReferences
+}
+
+struct ScheduledQueryPollRequest {
+ 1: required string clusterNamespace
+}
+
+struct ScheduledQueryKey {
+ 1: required string scheduleName,
+ 2: required string clusterNamespace,
+}
+
+struct ScheduledQueryPollResponse {
+ 1: optional ScheduledQueryKey scheduleKey,
+ 2: optional i64 executionId,
+ 3: optional string query,
+ 4: optional string user,
+}
+
+struct ScheduledQuery {
+ 1: required ScheduledQueryKey scheduleKey,
+ 2: optional bool enabled,
+ 4: optional string schedule,
+ 5: optional string user,
+ 6: optional string query,
+ 7: optional i32 nextExecution,
+}
+
+enum ScheduledQueryMaintenanceRequestType {
+ CREATE = 1,
+ ALTER = 2,
+ DROP = 3,
+}
+
+struct ScheduledQueryMaintenanceRequest {
+ 1: required ScheduledQueryMaintenanceRequestType type,
+ 2: required ScheduledQuery scheduledQuery,
+}
+
+enum QueryState {
+ INITED,
+ EXECUTING,
+ FAILED,
+ FINISHED,
+ TIMED_OUT,
+ AUTO_DISABLED,
+}
+
+struct ScheduledQueryProgressInfo{
+ 1: required i64 scheduledExecutionId,
+ 2: required QueryState state,
+ 3: required string executorQueryId,
+ 4: optional string errorMessage,
+}
+
+struct AlterPartitionsRequest {
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tableName,
+ 4: required list partitions,
+ 5: optional EnvironmentContext environmentContext,
+ 6: optional i64 writeId=-1,
+ 7: optional string validWriteIdList,
+ 8: optional bool skipColumnSchemaForPartition,
+ 9: optional list partitionColSchema
+}
+
+struct AppendPartitionsRequest {
+ 1: optional string catalogName,
+ 2: required string dbName,
+ 3: required string tableName,
+ 4: optional string name,
+ 5: optional list partVals,
+ 6: optional EnvironmentContext environmentContext
+}
+
+struct AlterPartitionsResponse {
+}
+
+struct RenamePartitionRequest {
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tableName,
+ 4: required list partVals,
+ 5: required Partition newPart,
+ 6: optional string validWriteIdList,
+ 7: optional i64 txnId, // txnId associated with the rename operation
+ 8: optional bool clonePart // non-blocking rename
+}
+
+struct RenamePartitionResponse {
+}
+
+struct AlterTableRequest {
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tableName,
+ 4: required Table table,
+ 5: optional EnvironmentContext environmentContext,
+ 6: optional i64 writeId=-1,
+ 7: optional string validWriteIdList
+ 8: optional list processorCapabilities,
+ 9: optional string processorIdentifier,
+ 10: optional string expectedParameterKey,
+ 11: optional string expectedParameterValue
+// TODO: also add cascade here, out of envCtx
+}
+
+struct AlterTableResponse {
+}
+
+enum PartitionFilterMode {
+ BY_NAMES, // filter by names
+ BY_VALUES, // filter by values
+ BY_EXPR // filter by expression
+}
+
+struct GetPartitionsFilterSpec {
+ 7: optional PartitionFilterMode filterMode,
+ 8: optional list filters //used as list of partitionNames or list of values or expressions depending on mode
+}
+
+struct GetPartitionsResponse {
+ 1: list partitionSpec
+}
+
+struct GetPartitionsRequest {
+ 1: optional string catName,
+ 2: string dbName,
+ 3: string tblName,
+ 4: optional bool withAuth,
+ 5: optional string user,
+ 6: optional list groupNames,
+ 7: GetProjectionsSpec projectionSpec
+ 8: GetPartitionsFilterSpec filterSpec,
+ 9: optional list processorCapabilities,
+ 10: optional string processorIdentifier,
+ 11: optional string validWriteIdList
+}
+
+struct GetFieldsRequest {
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tblName,
+ 4: optional EnvironmentContext envContext,
+ 5: optional string validWriteIdList,
+ 6: optional i64 id=-1 // table id
+}
+
+struct GetFieldsResponse {
+ 1: required list fields
+}
+
+struct GetSchemaRequest {
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tblName,
+ 4: optional EnvironmentContext envContext,
+ 5: optional string validWriteIdList,
+ 6: optional i64 id=-1 // table id
+}
+
+struct GetSchemaResponse {
+ 1: required list fields
+}
+
+struct GetPartitionRequest {
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tblName,
+ 4: required list partVals,
+ 5: optional string validWriteIdList,
+ 6: optional i64 id=-1 // table id
+}
+
+struct GetPartitionResponse {
+ 1: required Partition partition
+}
+
+struct PartitionsRequest { // Not using Get prefix as that name is already used for a different method
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tblName,
+ 4: optional i16 maxParts=-1,
+ 5: optional string validWriteIdList,
+ 6: optional i64 id=-1, // table id
+ 7: optional bool skipColumnSchemaForPartition,
+ 8: optional string includeParamKeyPattern,
+ 9: optional string excludeParamKeyPattern
+}
+
+struct PartitionsResponse { // Not using Get prefix as that name is already used for a different method
+ 1: required list partitions
+}
+
+struct GetPartitionsByFilterRequest {
+ 1: optional string catName,
+ 2: string dbName,
+ 3: string tblName,
+ 4: string filter,
+ 5: optional i16 maxParts=-1,
+ 6: optional bool skipColumnSchemaForPartition,
+ 7: optional string includeParamKeyPattern,
+ 8: optional string excludeParamKeyPattern
+}
+
+struct GetPartitionNamesPsRequest {
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tblName,
+ 4: optional list partValues,
+ 5: optional i16 maxParts=-1,
+ 6: optional string validWriteIdList,
+ 7: optional i64 id=-1 // table id
+}
+
+struct GetPartitionNamesPsResponse {
+ 1: required list names
+}
+
+struct GetPartitionsPsWithAuthRequest {
+ 1: optional string catName,
+ 2: required string dbName,
+ 3: required string tblName,
+ 4: optional list partVals,
+ 5: optional i16 maxParts=-1,
+ 6: optional string userName,
+ 7: optional list groupNames,
+ 8: optional string validWriteIdList,
+ 9: optional i64 id=-1 // table id
+ 10: optional bool skipColumnSchemaForPartition,
+ 11: optional string includeParamKeyPattern,
+ 12: optional string excludeParamKeyPattern,
+ 13: optional list partNames;
+}
+
+struct GetPartitionsPsWithAuthResponse {
+ 1: required list partitions
+}
+
+struct ReplicationMetrics{
+ 1: required i64 scheduledExecutionId,
+ 2: required string policy,
+ 3: required i64 dumpExecutionId,
+ 4: optional string metadata,
+ 5: optional string progress,
+ 6: optional string messageFormat
+}
+
+struct ReplicationMetricList{
+ 1: required list replicationMetricList,
+}
+
+struct GetReplicationMetricsRequest {
+ 1: optional i64 scheduledExecutionId,
+ 2: optional string policy,
+ 3: optional i64 dumpExecutionId
+}
+
+struct GetOpenTxnsRequest {
+ 1: optional list excludeTxnTypes;
+}
+
+struct StoredProcedureRequest {
+ 1: required string catName,
+ 2: required string dbName,
+ 3: required string procName
+}
+
+struct ListStoredProcedureRequest {
+ 1: required string catName
+ 2: optional string dbName
+}
+
+struct StoredProcedure {
+ 1: string name,
+ 2: string dbName,
+ 3: string catName,
+ 4: string ownerName,
+ 5: string source
+}
+
+struct AddPackageRequest {
+ 1: string catName,
+ 2: string dbName,
+ 3: string packageName
+ 4: string ownerName,
+ 5: string header,
+ 6: string body
+}
+
+struct GetPackageRequest {
+ 1: required string catName,
+ 2: required string dbName,
+ 3: required string packageName
+}
+
+struct DropPackageRequest {
+ 1: required string catName,
+ 2: required string dbName,
+ 3: required string packageName
+}
+
+struct ListPackageRequest {
+ 1: required string catName
+ 2: optional string dbName
+}
+
+struct Package {
+ 1: string catName,
+ 2: string dbName,
+ 3: string packageName
+ 4: string ownerName,
+ 5: string header,
+ 6: string body
+}
+
+struct GetAllWriteEventInfoRequest {
+ 1: required i64 txnId,
+ 2: optional string dbName,
+ 3: optional string tableName
+}
+
// Exceptions.
exception MetaException {
@@ -1632,11 +2550,19 @@ exception NoSuchLockException {
1: string message
}
+exception CompactionAbortedException {
+ 1: string message
+}
+
+exception NoSuchCompactionException {
+ 1: string message
+}
/**
* This interface is live.
*/
service ThriftHiveMetastore
{
+ AbortCompactResponse abort_Compactions(1: AbortCompactionRequest rqst)
string getMetaConf(1:string key) throws(1:MetaException o1)
void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1)
@@ -1647,13 +2573,23 @@ service ThriftHiveMetastore
void drop_catalog(1: DropCatalogRequest catName) throws (1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
+ void create_database_req(1:CreateDatabaseRequest createDatabaseRequest) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ Database get_database_req(1:GetDatabaseRequest request) throws(1:NoSuchObjectException o1, 2:MetaException o2)
void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ void drop_database_req(1:DropDatabaseRequest req) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
list get_databases(1:string pattern) throws(1:MetaException o1)
list get_all_databases() throws(1:MetaException o1)
void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ void alter_database_req(1:AlterDatabaseRequest alterDbReq) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+ void create_dataconnector_req(1:CreateDataConnectorRequest connectorReq) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
+ DataConnector get_dataconnector_req(1:GetDataConnectorRequest request) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ void drop_dataconnector_req(1:DropDataConnectorRequest dropDcReq) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ list get_dataconnectors() throws(1:MetaException o1)
+ void alter_dataconnector_req(1:AlterDataConnectorRequest alterReq) throws(1:MetaException o1, 2:NoSuchObjectException o2)
- // returns the type with given name (make seperate calls for the dependent types if needed)
+ // returns the type with given name (make seperate calls for the dependent types if needed)
Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2)
bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2)
@@ -1661,12 +2597,18 @@ service ThriftHiveMetastore
throws(1:MetaException o2)
// Gets a list of FieldSchemas describing the columns of a particular table
- list get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3),
- list get_fields_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
+ list get_fields(1:string db_name, 2:string table_name) throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
+ list get_fields_with_environment_context(1:string db_name, 2:string table_name, 3:EnvironmentContext environment_context) throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
+
+ GetFieldsResponse get_fields_req(1: GetFieldsRequest req)
+ throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
// Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table
- list get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
- list get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
+ list get_schema(1:string db_name, 2:string table_name) throws(1:MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
+ list get_schema_with_environment_context(1:string db_name, 2:string table_name, 3:EnvironmentContext environment_context) throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
+
+ GetSchemaResponse get_schema_req(1: GetSchemaRequest req)
+ throws(1:MetaException o1, 2:UnknownTableException o2, 3:UnknownDBException o3)
// create a Hive table. Following fields must be set
// tableName
@@ -1684,11 +2626,14 @@ service ThriftHiveMetastore
2:InvalidObjectException o2, 3:MetaException o3,
4:NoSuchObjectException o4)
void create_table_with_constraints(1:Table tbl, 2: list primaryKeys, 3: list foreignKeys,
- 4: list uniqueConstraints, 5: list notNullConstraints,
- 6: list defaultConstraints, 7: list checkConstraints)
+ 4: list uniqueConstraints, 5: list notNullConstraints,
+ 6: list defaultConstraints, 7: list checkConstraints)
throws (1:AlreadyExistsException o1,
2:InvalidObjectException o2, 3:MetaException o3,
4:NoSuchObjectException o4)
+ void create_table_req(1:CreateTableRequest request) throws (1:AlreadyExistsException o1,
+ 2:InvalidObjectException o2, 3:MetaException o3,
+ 4:NoSuchObjectException o4)
void drop_constraint(1:DropConstraintRequest req)
throws(1:NoSuchObjectException o1, 2:MetaException o3)
void add_primary_key(1:AddPrimaryKeyRequest req)
@@ -1703,7 +2648,8 @@ service ThriftHiveMetastore
throws(1:NoSuchObjectException o1, 2:MetaException o2)
void add_check_constraint(1:AddCheckConstraintRequest req)
throws(1:NoSuchObjectException o1, 2:MetaException o2)
-
+ Table translate_table_dryrun(1:CreateTableRequest request)
+ throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4)
// drops the table and all the partitions associated with it if the table has partitions
// delete data (including partitions) if deleteData is set to true
void drop_table(1:string dbname, 2:string name, 3:bool deleteData)
@@ -1711,18 +2657,20 @@ service ThriftHiveMetastore
void drop_table_with_environment_context(1:string dbname, 2:string name, 3:bool deleteData,
4:EnvironmentContext environment_context)
throws(1:NoSuchObjectException o1, 2:MetaException o3)
+ void drop_table_req(1:DropTableRequest dropTableReq)
+ throws(1:NoSuchObjectException o1, 2:MetaException o3)
void truncate_table(1:string dbName, 2:string tableName, 3:list partNames)
throws(1:MetaException o1)
+ TruncateTableResponse truncate_table_req(1:TruncateTableRequest req) throws(1:MetaException o1)
list get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1)
list get_tables_by_type(1: string db_name, 2: string pattern, 3: string tableType) throws (1: MetaException o1)
+ list get_all_materialized_view_objects_for_rewriting() throws (1:MetaException o1)
list get_materialized_views_for_rewriting(1: string db_name) throws (1: MetaException o1)
list get_table_meta(1: string db_patterns, 2: string tbl_patterns, 3: list tbl_types)
throws (1: MetaException o1)
list get_all_tables(1: string db_name) throws (1: MetaException o1)
- Table get_table(1:string dbname, 2:string tbl_name)
- throws (1:MetaException o1, 2:NoSuchObjectException o2)
- list get_table_objects_by_name(1:string dbname, 2:list tbl_names)
+ list get_tables_ext(1: GetTablesExtRequest req) throws (1: MetaException o1)
GetTableResult get_table_req(1:GetTableRequest req) throws (1:MetaException o1, 2:NoSuchObjectException o2)
GetTablesResult get_table_objects_by_name_req(1:GetTablesRequest req)
throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
@@ -1779,7 +2727,9 @@ service ThriftHiveMetastore
void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade)
throws (1:InvalidOperationException o1, 2:MetaException o2)
AlterTableResponse alter_table_req(1:AlterTableRequest req)
- throws (1:InvalidOperationException o1, 2:MetaException o2)
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
+
+
// the following applies to only tables that have partitions
// * See notes on DDL_TIME
@@ -1800,6 +2750,8 @@ service ThriftHiveMetastore
Partition append_partition_with_environment_context(1:string db_name, 2:string tbl_name,
3:list part_vals, 4:EnvironmentContext environment_context)
throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+ Partition append_partition_req(1:AppendPartitionsRequest appendPartitionsReq)
+ throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name)
throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
Partition append_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
@@ -1810,6 +2762,8 @@ service ThriftHiveMetastore
bool drop_partition_with_environment_context(1:string db_name, 2:string tbl_name,
3:list part_vals, 4:bool deleteData, 5:EnvironmentContext environment_context)
throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ bool drop_partition_req(1:DropPartitionRequest dropPartitionReq)
+ throws(1:NoSuchObjectException o1, 2:MetaException o2)
bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData)
throws(1:NoSuchObjectException o1, 2:MetaException o2)
bool drop_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
@@ -1820,6 +2774,8 @@ service ThriftHiveMetastore
Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals)
throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ GetPartitionResponse get_partition_req(1: GetPartitionRequest req)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
Partition exchange_partition(1:map partitionSpecs, 2:string source_db,
3:string source_table_name, 4:string dest_db, 5:string dest_table_name)
throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
@@ -1840,6 +2796,8 @@ service ThriftHiveMetastore
// If max parts is given then it will return only that many.
list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
throws(1:NoSuchObjectException o1, 2:MetaException o2)
+PartitionsResponse get_partitions_req(1:PartitionsRequest req)
+ throws(1:NoSuchObjectException o1, 2:MetaException o2)
list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1,
4: string user_name, 5: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
@@ -1848,7 +2806,8 @@ service ThriftHiveMetastore
list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
throws(1:NoSuchObjectException o1, 2:MetaException o2)
-
+ list fetch_partition_names_req(1:PartitionsRequest partitionReq)
+ throws(1:NoSuchObjectException o1, 2:MetaException o2)
PartitionValuesResponse get_partition_values(1:PartitionValuesRequest request)
throws(1:MetaException o1, 2:NoSuchObjectException o2);
@@ -1861,18 +2820,29 @@ service ThriftHiveMetastore
list get_partitions_ps(1:string db_name 2:string tbl_name
3:list part_vals, 4:i16 max_parts=-1)
throws(1:MetaException o1, 2:NoSuchObjectException o2)
- list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1,
+ list get_partitions_ps_with_auth(1:string db_name,
+ 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1,
5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ GetPartitionsPsWithAuthResponse get_partitions_ps_with_auth_req(1:GetPartitionsPsWithAuthRequest req)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
list get_partition_names_ps(1:string db_name,
2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1)
throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ GetPartitionNamesPsResponse get_partition_names_ps_req(1:GetPartitionNamesPsRequest req)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+ list get_partition_names_req(1:PartitionsByExprRequest req)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
// get the partitions matching the given partition filter
list get_partitions_by_filter(1:string db_name 2:string tbl_name
3:string filter, 4:i16 max_parts=-1)
throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ list get_partitions_by_filter_req(1:GetPartitionsByFilterRequest req)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
// List partitions as PartitionSpec instances.
list get_part_specs_by_filter(1:string db_name 2:string tbl_name
3:string filter, 4:i32 max_parts=-1)
@@ -1885,12 +2855,25 @@ service ThriftHiveMetastore
throws(1:MetaException o1, 2:NoSuchObjectException o2)
// get the partitions matching the given partition filter
+ // unlike get_partitions_by_expr, this returns PartitionSpec which contains deduplicated
+ // storage descriptor
+ PartitionsSpecByExprResult get_partitions_spec_by_expr(1:PartitionsByExprRequest req)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+ // get the partitions matching the given partition filter
i32 get_num_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter)
throws(1:MetaException o1, 2:NoSuchObjectException o2)
// get partitions give a list of partition names
list get_partitions_by_names(1:string db_name 2:string tbl_name 3:list names)
- throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3)
+ GetPartitionsByNamesResult get_partitions_by_names_req(1:GetPartitionsByNamesRequest req)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3)
+
+ // retrieve properties
+ PropertyGetResponse get_properties(1:PropertyGetRequest req) throws(1:MetaException e1, 2:NoSuchObjectException e2);
+ // set properties
+ bool set_properties(1:PropertySetRequest req) throws(1:MetaException e1, 2:NoSuchObjectException e2);
// changes the partition to the new partition object. partition is identified from the part values
// in the new_part
@@ -1902,22 +2885,26 @@ service ThriftHiveMetastore
// prehooks are fired together followed by all post hooks
void alter_partitions(1:string db_name, 2:string tbl_name, 3:list new_parts)
throws (1:InvalidOperationException o1, 2:MetaException o2)
+
void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2)
+ AlterPartitionsResponse alter_partitions_req(1:AlterPartitionsRequest req)
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
+
void alter_partition_with_environment_context(1:string db_name,
2:string tbl_name, 3:Partition new_part,
4:EnvironmentContext environment_context)
throws (1:InvalidOperationException o1, 2:MetaException o2)
- AlterPartitionsResponse alter_partitions_req(1:AlterPartitionsRequest req)
- throws (1:InvalidOperationException o1, 2:MetaException o2)
-
// rename the old partition to the new partition object by changing old part values to the part values
// in the new_part. old partition is identified from part_vals.
// partition keys in new_part should be the same as those in old partition.
void rename_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:Partition new_part)
throws (1:InvalidOperationException o1, 2:MetaException o2)
+ RenamePartitionResponse rename_partition_req(1:RenamePartitionRequest req)
+ throws (1:InvalidOperationException o1, 2:MetaException o2)
+
// returns whether or not the partition name is valid based on the value of the config
// hive.metastore.partition.name.whitelist.pattern
bool partition_name_has_valid_characters(1:list part_vals, 2:bool throw_exception)
@@ -1961,6 +2948,9 @@ service ThriftHiveMetastore
throws(1:MetaException o1, 2:NoSuchObjectException o2)
CheckConstraintsResponse get_check_constraints(1:CheckConstraintsRequest request)
throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ // All table constrains
+ AllTableConstraintsResponse get_all_table_constraints(1:AllTableConstraintsRequest request)
+ throws(1:MetaException o1, 2:NoSuchObjectException o2)
// column statistics interfaces
@@ -1978,6 +2968,9 @@ service ThriftHiveMetastore
SetPartitionsStatsResponse update_partition_column_statistics_req(1:SetPartitionsStatsRequest req) throws (1:NoSuchObjectException o1,
2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
+ void update_transaction_statistics(1:UpdateTransactionalStatsRequest req) throws (1:MetaException o1)
+
+
// get APIs return the column statistics corresponding to db_name, tbl_name, [part_name], col_name if
// such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException
// For instance, if get_table_column_statistics is called on a partitioned table for which only
@@ -2000,10 +2993,10 @@ service ThriftHiveMetastore
// delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name]
// and col_name. If the delete API doesn't find the statistics record in the metastore, throws NoSuchObjectException
// Delete API validates the input and if the input is invalid throws InvalidInputException/InvalidObjectException.
- bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name) throws
+ bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name, 5:string engine) throws
(1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
4:InvalidInputException o4)
- bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
+ bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name, 4:string engine) throws
(1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
4:InvalidInputException o4)
@@ -2110,6 +3103,7 @@ service ThriftHiveMetastore
// Transaction and lock management calls
// Get just list of open transactions
+ // Deprecated use get_open_txns_req
GetOpenTxnsResponse get_open_txns()
// Get list of open transactions with state (open, aborted)
GetOpenTxnsInfoResponse get_open_txns_info()
@@ -2117,11 +3111,18 @@ service ThriftHiveMetastore
void abort_txn(1:AbortTxnRequest rqst) throws (1:NoSuchTxnException o1)
void abort_txns(1:AbortTxnsRequest rqst) throws (1:NoSuchTxnException o1)
void commit_txn(1:CommitTxnRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
+ i64 get_latest_txnid_in_conflict(1:i64 txnId) throws (1:MetaException o1)
void repl_tbl_writeid_state(1: ReplTblWriteIdStateRequest rqst)
GetValidWriteIdsResponse get_valid_write_ids(1:GetValidWriteIdsRequest rqst)
throws (1:NoSuchTxnException o1, 2:MetaException o2)
+ void add_write_ids_to_min_history(1:i64 txnId, 2: map writeIds) throws (1:MetaException o2)
AllocateTableWriteIdsResponse allocate_table_write_ids(1:AllocateTableWriteIdsRequest rqst)
throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:MetaException o3)
+ MaxAllocatedTableWriteIdResponse get_max_allocated_table_write_id(1:MaxAllocatedTableWriteIdRequest rqst)
+ throws (1:MetaException o1)
+ void seed_write_id(1:SeedTableWriteIdsRequest rqst)
+ throws (1:MetaException o1)
+ void seed_txn_id(1:SeedTxnIdRequest rqst) throws (1:MetaException o1)
LockResponse lock(1:LockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
LockResponse check_lock(1:CheckLockRequest rqst)
throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:NoSuchLockException o3)
@@ -2132,7 +3133,21 @@ service ThriftHiveMetastore
void compact(1:CompactionRequest rqst)
CompactionResponse compact2(1:CompactionRequest rqst)
ShowCompactResponse show_compact(1:ShowCompactRequest rqst)
+ bool submit_for_cleanup(1:CompactionRequest o1, 2:i64 o2, 3:i64 o3) throws (1:MetaException o1)
void add_dynamic_partitions(1:AddDynamicPartitions rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
+ // Deprecated, use find_next_compact2()
+ OptionalCompactionInfoStruct find_next_compact(1: string workerId) throws(1:MetaException o1)
+ OptionalCompactionInfoStruct find_next_compact2(1: FindNextCompactRequest rqst) throws(1:MetaException o1)
+ void update_compactor_state(1: CompactionInfoStruct cr, 2: i64 txn_id)
+ list find_columns_with_stats(1: CompactionInfoStruct cr)
+ void mark_cleaned(1:CompactionInfoStruct cr) throws(1:MetaException o1)
+ void mark_compacted(1: CompactionInfoStruct cr) throws(1:MetaException o1)
+ void mark_failed(1: CompactionInfoStruct cr) throws(1:MetaException o1)
+ void mark_refused(1: CompactionInfoStruct cr) throws(1:MetaException o1)
+ bool update_compaction_metrics_data(1: CompactionMetricsDataStruct data) throws(1:MetaException o1)
+ void remove_compaction_metrics_data(1: CompactionMetricsDataRequest request) throws(1:MetaException o1)
+ void set_hadoop_jobid(1: string jobId, 2: i64 cq_id)
+ GetLatestCommittedCompactionInfoResponse get_latest_committed_compaction_info(1:GetLatestCommittedCompactionInfoRequest rqst)
// Notification logging calls
NotificationEventResponse get_next_notification(1:NotificationEventRequest rqst)
@@ -2140,6 +3155,8 @@ service ThriftHiveMetastore
NotificationEventsCountResponse get_notification_events_count(1:NotificationEventsCountRequest rqst)
FireEventResponse fire_listener_event(1:FireEventRequest rqst)
void flushCache()
+ WriteNotificationLogResponse add_write_notification_log(1:WriteNotificationLogRequest rqst)
+ WriteNotificationLogBatchResponse add_write_notification_log_in_batch(1:WriteNotificationLogBatchRequest rqst)
// Repl Change Management api
CmRecycleResponse cm_recycle(1:CmRecycleRequest request) throws(1:MetaException o1)
@@ -2241,6 +3258,29 @@ service ThriftHiveMetastore
void add_runtime_stats(1: RuntimeStat stat) throws(1:MetaException o1)
list get_runtime_stats(1: GetRuntimeStatsRequest rqst) throws(1:MetaException o1)
+
+ // get_partitions with filter and projectspec
+ GetPartitionsResponse get_partitions_with_specs(1: GetPartitionsRequest request) throws(1:MetaException o1)
+
+ ScheduledQueryPollResponse scheduled_query_poll(1: ScheduledQueryPollRequest request) throws(1:MetaException o1)
+ void scheduled_query_maintenance(1: ScheduledQueryMaintenanceRequest request) throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:AlreadyExistsException o3, 4:InvalidInputException o4)
+ void scheduled_query_progress(1: ScheduledQueryProgressInfo info) throws(1:MetaException o1, 2: InvalidOperationException o2)
+ ScheduledQuery get_scheduled_query(1: ScheduledQueryKey scheduleKey) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+
+ void add_replication_metrics(1: ReplicationMetricList replicationMetricList) throws(1:MetaException o1)
+ ReplicationMetricList get_replication_metrics(1: GetReplicationMetricsRequest rqst) throws(1:MetaException o1)
+ GetOpenTxnsResponse get_open_txns_req(1: GetOpenTxnsRequest getOpenTxnsRequest)
+
+ void create_stored_procedure(1: StoredProcedure proc) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ StoredProcedure get_stored_procedure(1: StoredProcedureRequest request) throws (1:MetaException o1, 2:NoSuchObjectException o2)
+ void drop_stored_procedure(1: StoredProcedureRequest request) throws (1:MetaException o1)
+ list get_all_stored_procedures(1: ListStoredProcedureRequest request) throws (1:MetaException o1)
+
+ Package find_package(1: GetPackageRequest request) throws (1:MetaException o1, 2:NoSuchObjectException o2)
+ void add_package(1: AddPackageRequest request) throws (1:MetaException o1)
+ list get_all_packages(1: ListPackageRequest request) throws (1:MetaException o1)
+ void drop_package(1: DropPackageRequest request) throws (1:MetaException o1)
+ list get_all_write_event_info(1: GetAllWriteEventInfoRequest request) throws (1:MetaException o1)
}
// * Note about the DDL_TIME: When creating or altering a table or a partition,
@@ -2267,6 +3307,7 @@ const string META_TABLE_COLUMN_TYPES = "columns.types",
const string BUCKET_FIELD_NAME = "bucket_field_name",
const string BUCKET_COUNT = "bucket_count",
const string FIELD_TO_DIMENSION = "field_to_dimension",
+const string IF_PURGE = "ifPurge",
const string META_TABLE_NAME = "name",
const string META_TABLE_DB = "db",
const string META_TABLE_LOCATION = "location",
@@ -2277,7 +3318,22 @@ const string FILE_INPUT_FORMAT = "file.inputformat",
const string FILE_OUTPUT_FORMAT = "file.outputformat",
const string META_TABLE_STORAGE = "storage_handler",
const string TABLE_IS_TRANSACTIONAL = "transactional",
-const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction",
+const string NO_AUTO_COMPACT = "no_auto_compaction",
const string TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties",
const string TABLE_BUCKETING_VERSION = "bucketing_version",
-
+const string DRUID_CONFIG_PREFIX = "druid.",
+const string JDBC_CONFIG_PREFIX = "hive.sql.",
+const string TABLE_IS_CTAS = "created_with_ctas",
+const string TABLE_IS_CTLT = "created_with_ctlt",
+const string PARTITION_TRANSFORM_SPEC = "partition_transform_spec",
+const string NO_CLEANUP = "no_cleanup",
+const string CTAS_LEGACY_CONFIG = "create_table_as_external",
+const string DEFAULT_TABLE_TYPE = "defaultTableType",
+
+// ACID
+const string TXN_ID = "txnId",
+const string WRITE_ID = "writeId",
+
+// Keys for alter table environment context parameters
+const string EXPECTED_PARAMETER_KEY = "expected_parameter_key",
+const string EXPECTED_PARAMETER_VALUE = "expected_parameter_value",