diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index d651195..e13583b 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -36,16 +36,31 @@ struct Version { } struct FieldSchema { - 1: string name, // name of the field - 2: string type, // type of the field. primitive types defined above, specify list, map for lists & maps + /** name of the field */ + 1: string name, + + /** + * Type of the field. + * Primitive types defined above, specify list, map + * for lists & maps. + */ + 2: string type, + 3: string comment } struct Type { - 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types - 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE) - 3: optional string type2, // val type if the name is 'map' (MAP_TYPE) - 4: optional list fields // if the name is one of the user defined types + /** One of the types in PrimitiveTypes or CollectionTypes or User defined types. */ + 1: string name, + + /** Object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE). */ + 2: optional string type1, + + /** Value type if the name is 'map' (MAP_TYPE). */ + 3: optional string type2, + + /** If the name is one of the user defined types. */ + 4: optional list fields } enum HiveObjectType { @@ -70,40 +85,52 @@ enum PartitionEventType { LOAD_DONE = 1, } -// Enums for transaction and lock management +// Enums for transaction and lock management enum TxnState { - COMMITTED = 1, - ABORTED = 2, - OPEN = 3, + COMMITTED = 1, + ABORTED = 2, + OPEN = 3, } enum LockLevel { - DB = 1, - TABLE = 2, - PARTITION = 3, + DB = 1, + TABLE = 2, + PARTITION = 3, } enum LockState { - ACQUIRED = 1, // requester has the lock - WAITING = 2, // requester is waiting for the lock and should call checklock at a later point to see if the lock has been obtained. - ABORT = 3, // the lock has been aborted, most likely due to timeout - NOT_ACQUIRED = 4, // returned only with lockNoWait, indicates the lock was not available and was not acquired + /** Requester has the lock. */ + ACQUIRED = 1, + + /** + * Requester is waiting for the lock and should call checkVlock at a later point to see if the + * lock has been obtained. + */ + WAITING = 2, + + /** + * The lock has been aborted, most likely due to timeout. + */ + ABORT = 3, + + /** Returned only with lockNoWait, indicates the lock was not available and was not acquired */ + NOT_ACQUIRED = 4, } enum LockType { - SHARED_READ = 1, - SHARED_WRITE = 2, - EXCLUSIVE = 3, + SHARED_READ = 1, + SHARED_WRITE = 2, + EXCLUSIVE = 3, } enum CompactionType { - MINOR = 1, - MAJOR = 2, + MINOR = 1, + MAJOR = 2, } enum GrantRevokeType { - GRANT = 1, - REVOKE = 2, + GRANT = 1, + REVOKE = 2, } // Types of events the client can request that the metastore fire. For now just support DML operations, as the metastore knows @@ -143,15 +170,22 @@ struct PrivilegeBag { } struct PrincipalPrivilegeSet { - 1: map> userPrivileges, // user name -> privilege grant info - 2: map> groupPrivileges, // group name -> privilege grant info - 3: map> rolePrivileges, //role name -> privilege grant info + /** User name -> privilege grant info. */ + 1: map> userPrivileges, + + /** Group name -> privilege grant info. */ + 2: map> groupPrivileges, + + /** Role name -> privilege grant info. */ + 3: map> rolePrivileges, } struct GrantRevokePrivilegeRequest { 1: GrantRevokeType requestType; 2: PrivilegeBag privileges; - 3: optional bool revokeGrantOption; // Only for revoke request + + /** Only for revoke request. */ + 3: optional bool revokeGrantOption; } struct GrantRevokePrivilegeResponse { @@ -164,7 +198,7 @@ struct Role { 3: string ownerName, } -// Representation of a grant for a principal to a role +/** Representation of a grant for a principal to a role. */ struct RolePrincipalGrant { 1: string roleName, 2: string principalName, @@ -197,8 +231,12 @@ struct GrantRevokeRoleRequest { 2: string roleName; 3: string principalName; 4: PrincipalType principalType; - 5: optional string grantor; // Needed for grant - 6: optional PrincipalType grantorType; // Needed for grant + + /** Needed for grant. */ + 5: optional string grantor; + + /** Needed for grant. */ + 6: optional PrincipalType grantorType; 7: optional bool grantOption; } @@ -206,84 +244,150 @@ struct GrantRevokeRoleResponse { 1: optional bool success; } -// namespace for tables +/** Namespace for tables. */ struct Database { 1: string name, 2: string description, 3: string locationUri, - 4: map parameters, // properties associated with the database + + /** Properties associated with the database. */ + 4: map parameters, 5: optional PrincipalPrivilegeSet privileges, 6: optional string ownerName, 7: optional PrincipalType ownerType } -// This object holds the information needed by SerDes +/** This object holds the information needed by SerDes. */ struct SerDeInfo { - 1: string name, // name of the serde, table name by default - 2: string serializationLib, // usually the class that implements the extractor & loader - 3: map parameters // initialization parameters + /** Name of the serde, table name by default. */ + 1: string name, + + /** Usually the class that implements the extractor & loader. */ + 2: string serializationLib, + + /** Initialization parameters. */ + 3: map parameters } -// sort order of a column (column name along with asc(1)/desc(0)) +/** Sort order of a column (column name along with asc(1)/desc(0)). */ struct Order { - 1: string col, // sort column name - 2: i32 order // asc(1) or desc(0) + /** Sort column name. */ + 1: string col, + + /** asc(1) or desc(0). */ + 2: i32 order } -// this object holds all the information about skewed table +/** This object holds all the information about a skewed table. */ struct SkewedInfo { - 1: list skewedColNames, // skewed column names - 2: list> skewedColValues, //skewed values - 3: map, string> skewedColValueLocationMaps, //skewed value to location mappings + /** Skewed column names. */ + 1: list skewedColNames, + + /** Skewed values. */ + 2: list> skewedColValues, + + /** Skewed value to location mappings. */ + 3: map, string> skewedColValueLocationMaps, } -// this object holds all the information about physical storage of the data belonging to a table +/** + * This object holds all the information about the physical storage + * of the data belonging to a table. + */ struct StorageDescriptor { - 1: list cols, // required (refer to types defined above) - 2: string location, // defaults to //tablename - 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format - 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format - 5: bool compressed, // compressed or not - 6: i32 numBuckets, // this must be specified if there are any dimension columns - 7: SerDeInfo serdeInfo, // serialization and deserialization information - 8: list bucketCols, // reducer grouping columns and clustering columns and bucketing columns` - 9: list sortCols, // sort order of the data in each bucket - 10: map parameters, // any user supplied key value hash - 11: optional SkewedInfo skewedInfo, // skewed information - 12: optional bool storedAsSubDirectories // stored as subdirectories or not -} - -// table information + /** Required (refer to types defined above). */ + 1: list cols, + + /** Defaults to //tablename. */ + 2: string location, + + /** SequenceFileInputFormat (binary), TextInputFormat or custom format. */ + 3: string inputFormat, + + /** SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format. */ + 4: string outputFormat, + + /** Compressed or not. */ + 5: bool compressed, + + /** This must be specified if there are any dimension columns. */ + 6: i32 numBuckets, + + /** Serialization and deserialization information. */ + 7: SerDeInfo serdeInfo, + + /** Reducer grouping columns, clustering columns and bucketing columns. */ + 8: list bucketCols, + + /** Sort order of the data in each bucket. */ + 9: list sortCols, + + /** Any user supplied key value hash. */ + 10: map parameters, + + /** Skewed information. */ + 11: optional SkewedInfo skewedInfo, + + /** Stored as subdirectories or not. */ + 12: optional bool storedAsSubDirectories +} + +/** Table information. */ struct Table { - 1: string tableName, // name of the table - 2: string dbName, // database name ('default') - 3: string owner, // owner of this table - 4: i32 createTime, // creation time of the table - 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on) - 6: i32 retention, // retention time - 7: StorageDescriptor sd, // storage descriptor of the table - 8: list partitionKeys, // partition keys of the table. only primitive types are supported - 9: map parameters, // to store comments or any other user level parameters - 10: string viewOriginalText, // original view text, null for non-view - 11: string viewExpandedText, // expanded view text, null for non-view - 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE + /** Name of the table. */ + 1: string tableName, + + /** Database name ('default'). */ + 2: string dbName, + + /** Owner of this table. */ + 3: string owner, + + /** Creation time of the table. */ + 4: i32 createTime, + + /** Last access time (usually this will be filled from HDFS and shouldn't be relied on). */ + 5: i32 lastAccessTime, + + /** Retention time. */ + 6: i32 retention, + + /** Storage descriptor of the table. */ + 7: StorageDescriptor sd, + + /** Partition keys of the table. only primitive types are supported. */ + 8: list partitionKeys, + + /** To store comments or any other user level parameters. */ + 9: map parameters, + + /** Original view text, null for non-view. */ + 10: string viewOriginalText, + + /** Expanded view text, null for non-view. */ + 11: string viewExpandedText, + + /** Table type enum, e.g. EXTERNAL_TABLE. */ + 12: string tableType, 13: optional PrincipalPrivilegeSet privileges, 14: optional bool temporary=false } struct Partition { - 1: list values // string value is converted to appropriate partition key type - 2: string dbName, - 3: string tableName, - 4: i32 createTime, - 5: i32 lastAccessTime, - 6: StorageDescriptor sd, + /** String value is converted to appropriate partition key type. */ + 1: list values, + 2: string dbName, + 3: string tableName, + 4: i32 createTime, + 5: i32 lastAccessTime, + 6: StorageDescriptor sd, 7: map parameters, 8: optional PrincipalPrivilegeSet privileges } struct PartitionWithoutSD { - 1: list values // string value is converted to appropriate partition key type + /** String value is converted to appropriate partition key type. */ + 1: list values 2: i32 createTime, 3: i32 lastAccessTime, 4: string relativePath, @@ -309,8 +413,11 @@ struct PartitionSpec { } struct Index { - 1: string indexName, // unique with in the whole database namespace - 2: string indexHandlerClass, // reserved + /** Unique with in the whole database namespace. */ + 1: string indexName, + + /** Reserved. */ + 2: string indexHandlerClass, 3: string dbName, 4: string origTableName, 5: i32 createTime, @@ -321,121 +428,125 @@ struct Index { 10: bool deferredRebuild } -// column statistics +/** Column statistics. */ struct BooleanColumnStatsData { -1: required i64 numTrues, -2: required i64 numFalses, -3: required i64 numNulls + 1: required i64 numTrues, + 2: required i64 numFalses, + 3: required i64 numNulls } struct DoubleColumnStatsData { -1: optional double lowValue, -2: optional double highValue, -3: required i64 numNulls, -4: required i64 numDVs + 1: optional double lowValue, + 2: optional double highValue, + 3: required i64 numNulls, + 4: required i64 numDVs } struct LongColumnStatsData { -1: optional i64 lowValue, -2: optional i64 highValue, -3: required i64 numNulls, -4: required i64 numDVs + 1: optional i64 lowValue, + 2: optional i64 highValue, + 3: required i64 numNulls, + 4: required i64 numDVs } struct StringColumnStatsData { -1: required i64 maxColLen, -2: required double avgColLen, -3: required i64 numNulls, -4: required i64 numDVs + 1: required i64 maxColLen, + 2: required double avgColLen, + 3: required i64 numNulls, + 4: required i64 numDVs } struct BinaryColumnStatsData { -1: required i64 maxColLen, -2: required double avgColLen, -3: required i64 numNulls + 1: required i64 maxColLen, + 2: required double avgColLen, + 3: required i64 numNulls } struct Decimal { -1: required binary unscaled, -3: required i16 scale + 1: required binary unscaled, + 3: required i16 scale } struct DecimalColumnStatsData { -1: optional Decimal lowValue, -2: optional Decimal highValue, -3: required i64 numNulls, -4: required i64 numDVs + 1: optional Decimal lowValue, + 2: optional Decimal highValue, + 3: required i64 numNulls, + 4: required i64 numDVs } struct Date { -1: required i64 daysSinceEpoch + 1: required i64 daysSinceEpoch } struct DateColumnStatsData { -1: optional Date lowValue, -2: optional Date highValue, -3: required i64 numNulls, -4: required i64 numDVs + 1: optional Date lowValue, + 2: optional Date highValue, + 3: required i64 numNulls, + 4: required i64 numDVs } union ColumnStatisticsData { -1: BooleanColumnStatsData booleanStats, -2: LongColumnStatsData longStats, -3: DoubleColumnStatsData doubleStats, -4: StringColumnStatsData stringStats, -5: BinaryColumnStatsData binaryStats, -6: DecimalColumnStatsData decimalStats, -7: DateColumnStatsData dateStats + 1: BooleanColumnStatsData booleanStats, + 2: LongColumnStatsData longStats, + 3: DoubleColumnStatsData doubleStats, + 4: StringColumnStatsData stringStats, + 5: BinaryColumnStatsData binaryStats, + 6: DecimalColumnStatsData decimalStats, + 7: DateColumnStatsData dateStats } struct ColumnStatisticsObj { -1: required string colName, -2: required string colType, -3: required ColumnStatisticsData statsData + 1: required string colName, + 2: required string colType, + 3: required ColumnStatisticsData statsData } struct ColumnStatisticsDesc { -1: required bool isTblLevel, -2: required string dbName, -3: required string tableName, -4: optional string partName, -5: optional i64 lastAnalyzed + 1: required bool isTblLevel, + 2: required string dbName, + 3: required string tableName, + 4: optional string partName, + 5: optional i64 lastAnalyzed } struct ColumnStatistics { -1: required ColumnStatisticsDesc statsDesc, -2: required list statsObj; + 1: required ColumnStatisticsDesc statsDesc, + 2: required list statsObj; } struct AggrStats { -1: required list colStats, -2: required i64 partsFound // number of partitions for which stats were found + 1: required list colStats, + + 2: required i64 partsFound } struct SetPartitionsStatsRequest { -1: required list colStats + 1: required list colStats } -// schema of the table/query results etc. +/** Schema of the table/query results etc. */ struct Schema { - // column names, types, comments - 1: list fieldSchemas, // delimiters etc + /** Column names, types, comments. */ + 1: list fieldSchemas, + + /** Delimiters etc. */ 2: map properties } -// Key-value store to be used with selected -// Metastore APIs (create, alter methods). -// The client can pass environment properties / configs that can be -// accessed in hooks. +/** + * Key-value store to be used with selected Metastore APIs (create, alter methods). + * The client can pass environment properties / configs that can be accessed in hooks. + */ struct EnvironmentContext { 1: map properties } -// Return type for get_partitions_by_expr +/** Return type for get_partitions_by_expr. */ struct PartitionsByExprResult { 1: required list partitions, - // Whether the results has any (currently, all) partitions which may or may not match + + /** Whether the results has any (currently, all) partitions which may or may not match. */ 2: required bool hasUnknownPartitions } @@ -468,12 +579,12 @@ struct PartitionsStatsRequest { 4: required list partNames } -// Return type for add_partitions_req +/** Return type for add_partitions_req. */ struct AddPartitionsResult { 1: optional list partitions, } -// Request type for add_partitions_req +/** Request type for add_partitions_req. */ struct AddPartitionsRequest { 1: required string dbName, 2: required string tblName, @@ -482,7 +593,7 @@ struct AddPartitionsRequest { 5: optional bool needResult=true } -// Return type for drop_partitions_req +/** Return type for drop_partitions_req. */ struct DropPartitionsResult { 1: optional list partitions, } @@ -497,17 +608,19 @@ union RequestPartsSpec { 2: list exprs; } -// Request type for drop_partitions_req // TODO: we might want to add "bestEffort" flag; where a subset can fail +/** Request type for drop_partitions_req. */ struct DropPartitionsRequest { 1: required string dbName, 2: required string tblName, 3: required RequestPartsSpec parts, 4: optional bool deleteData, - 5: optional bool ifExists=true, // currently verified on client + + /** Currently verified on client. */ + 5: optional bool ifExists = true, 6: optional bool ignoreProtection, 7: optional EnvironmentContext environmentContext, - 8: optional bool needResult=true + 8: optional bool needResult = true } enum FunctionType { @@ -525,7 +638,7 @@ struct ResourceUri { 2: string uri, } -// User-defined function +/** User-defined function. */ struct Function { 1: string functionName, 2: string dbName, @@ -537,181 +650,190 @@ struct Function { 8: list resourceUris, } -// Structs for transaction and locks +/** Structs for transaction and locks. */ struct TxnInfo { - 1: required i64 id, - 2: required TxnState state, - 3: required string user, // used in 'show transactions' to help admins find who has open transactions - 4: required string hostname, // used in 'show transactions' to help admins find who has open transactions + 1: required i64 id, + 2: required TxnState state, + + /** Used in 'show transactions' to help admins find who has open transactions. */ + 3: required string user, + + /** Used in 'show transactions' to help admins find who has open transactions. */ + 4: required string hostname, } struct GetOpenTxnsInfoResponse { - 1: required i64 txn_high_water_mark, - 2: required list open_txns, + 1: required i64 txn_high_water_mark, + 2: required list open_txns, } struct GetOpenTxnsResponse { - 1: required i64 txn_high_water_mark, - 2: required set open_txns, + 1: required i64 txn_high_water_mark, + 2: required set open_txns, } struct OpenTxnRequest { - 1: required i32 num_txns, - 2: required string user, - 3: required string hostname, + 1: required i32 num_txns, + 2: required string user, + 3: required string hostname, } struct OpenTxnsResponse { - 1: required list txn_ids, + 1: required list txn_ids, } struct AbortTxnRequest { - 1: required i64 txnid, + 1: required i64 txnid, } struct CommitTxnRequest { - 1: required i64 txnid, + 1: required i64 txnid, } struct LockComponent { - 1: required LockType type, - 2: required LockLevel level, - 3: required string dbname, - 4: optional string tablename, - 5: optional string partitionname, + 1: required LockType type, + 2: required LockLevel level, + 3: required string dbname, + 4: optional string tablename, + 5: optional string partitionname, } struct LockRequest { - 1: required list component, - 2: optional i64 txnid, - 3: required string user, // used in 'show locks' to help admins find who has open locks - 4: required string hostname, // used in 'show locks' to help admins find who has open locks + 1: required list component, + 2: optional i64 txnid, + + /** Used in 'show locks' to help admins find who has open locks. */ + 3: required string user, + + /** Used in 'show locks' to help admins find who has open locks. */ + 4: required string hostname, } struct LockResponse { - 1: required i64 lockid, - 2: required LockState state, + 1: required i64 lockid, + 2: required LockState state, } struct CheckLockRequest { - 1: required i64 lockid, + 1: required i64 lockid, } struct UnlockRequest { - 1: required i64 lockid, + 1: required i64 lockid, } struct ShowLocksRequest { } struct ShowLocksResponseElement { - 1: required i64 lockid, - 2: required string dbname, - 3: optional string tablename, - 4: optional string partname, - 5: required LockState state, - 6: required LockType type, - 7: optional i64 txnid, - 8: required i64 lastheartbeat, - 9: optional i64 acquiredat, - 10: required string user, - 11: required string hostname, + 1: required i64 lockid, + 2: required string dbname, + 3: optional string tablename, + 4: optional string partname, + 5: required LockState state, + 6: required LockType type, + 7: optional i64 txnid, + 8: required i64 lastheartbeat, + 9: optional i64 acquiredat, + 10: required string user, + 11: required string hostname, } struct ShowLocksResponse { - 1: list locks, + 1: list locks, } struct HeartbeatRequest { - 1: optional i64 lockid, - 2: optional i64 txnid + 1: optional i64 lockid, + 2: optional i64 txnid } struct HeartbeatTxnRangeRequest { - 1: required i64 min, - 2: required i64 max + 1: required i64 min, + 2: required i64 max } struct HeartbeatTxnRangeResponse { - 1: required set aborted, - 2: required set nosuch + 1: required set aborted, + 2: required set nosuch } struct CompactionRequest { - 1: required string dbname, - 2: required string tablename, - 3: optional string partitionname, - 4: required CompactionType type, - 5: optional string runas, + 1: required string dbname, + 2: required string tablename, + 3: optional string partitionname, + 4: required CompactionType type, + 5: optional string runas, } struct ShowCompactRequest { } struct ShowCompactResponseElement { - 1: required string dbname, - 2: required string tablename, - 3: optional string partitionname, - 4: required CompactionType type, - 5: required string state, - 6: optional string workerid, - 7: optional i64 start, - 8: optional string runAs, + 1: required string dbname, + 2: required string tablename, + 3: optional string partitionname, + 4: required CompactionType type, + 5: required string state, + 6: optional string workerid, + 7: optional i64 start, + 8: optional string runAs, } struct ShowCompactResponse { - 1: required list compacts, + 1: required list compacts, } struct AddDynamicPartitions { - 1: required i64 txnid, - 2: required string dbname, - 3: required string tablename, - 4: required list partitionnames, + 1: required i64 txnid, + 2: required string dbname, + 3: required string tablename, + 4: required list partitionnames, } struct NotificationEventRequest { - 1: required i64 lastEvent, - 2: optional i32 maxEvents, + 1: required i64 lastEvent, + 2: optional i32 maxEvents, } struct NotificationEvent { - 1: required i64 eventId, - 2: required i32 eventTime, - 3: required string eventType, - 4: optional string dbName, - 5: optional string tableName, - 6: required string message, + 1: required i64 eventId, + 2: required i32 eventTime, + 3: required string eventType, + 4: optional string dbName, + 5: optional string tableName, + 6: required string message, } struct NotificationEventResponse { - 1: required list events, + 1: required list events, } struct CurrentNotificationEventId { - 1: required i64 eventId, + 1: required i64 eventId, } struct InsertEventRequestData { - 1: required list filesAdded + 1: required list filesAdded } union FireEventRequestData { - 1: InsertEventRequestData insertData + 1: InsertEventRequestData insertData } struct FireEventRequest { - 1: required bool successful, - 2: required FireEventRequestData data - // dbname, tablename, and partition vals are included as optional in the top level event rather than placed in each type of - // subevent as I assume they'll be used across most event types. - 3: optional string dbName, - 4: optional string tableName, - 5: optional list partitionVals, + 1: required bool successful, + 2: required FireEventRequestData data + // dbname, tablename, and partition vals are included as optional in the top level event rather + // than placed in each type of subevent as I assume they'll be used across most event types. + 3: optional string dbName, + 4: optional string tableName, + 5: optional list partitionVals, } +/** Noop for now, this is just a place holder for future responses */ struct FireEventResponse { - // NOP for now, this is just a place holder for future responses + } @@ -767,134 +889,160 @@ exception InvalidInputException { 1: string message } -// Transaction and lock exceptions +/** Transaction and lock exceptions. */ exception NoSuchTxnException { - 1: string message + 1: string message } exception TxnAbortedException { - 1: string message + 1: string message } exception TxnOpenException { - 1: string message + 1: string message } exception NoSuchLockException { - 1: string message + 1: string message } /** * This interface is live. */ -service ThriftHiveMetastore extends fb303.FacebookService -{ +service ThriftHiveMetastore extends fb303.FacebookService { string getMetaConf(1:string key) throws(1:MetaException o1) void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1) - void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) + void create_database(1:Database database) + throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) - void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) + throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) list get_databases(1:string pattern) throws(1:MetaException o1) list get_all_databases() throws(1:MetaException o1) - void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2) + void alter_database(1:string dbname, 2:Database db) + throws(1:MetaException o1, 2:NoSuchObjectException o2) - // returns the type with given name (make seperate calls for the dependent types if needed) + /** Returns the type with given name (make seperate calls for the dependent types if needed). */ Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2) - bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) + bool create_type(1:Type type) + throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2) - map get_type_all(1:string name) - throws(1:MetaException o2) - - // Gets a list of FieldSchemas describing the columns of a particular table - list get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), - list get_fields_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) - - // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table - list get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) - list get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) - - // create a Hive table. Following fields must be set - // tableName - // database (only 'default' for now until Hive QL supports databases) - // owner (not needed, but good to have for tracking purposes) - // sd.cols (list of field schemas) - // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat) - // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat) - // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe - // * See notes on DDL_TIME - void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) - void create_table_with_environment_context(1:Table tbl, - 2:EnvironmentContext environment_context) - throws (1:AlreadyExistsException o1, - 2:InvalidObjectException o2, 3:MetaException o3, - 4:NoSuchObjectException o4) - // drops the table and all the partitions associated with it if the table has partitions - // delete data (including partitions) if deleteData is set to true + map get_type_all(1:string name) throws(1:MetaException o2) + + /** Gets a list of FieldSchemas describing the columns of a particular table. */ + list get_fields(1: string db_name, 2: string table_name) + throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), + list get_fields_with_environment_context(1: string db_name, 2: string table_name, + 3:EnvironmentContext environment_context) + throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + + /** + * Gets a list of FieldSchemas describing both the columns and the partition keys of a + * particular table. + */ + list get_schema(1: string db_name, 2: string table_name) + throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + list get_schema_with_environment_context(1: string db_name, 2: string table_name, + 3:EnvironmentContext environment_context) + throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + + + /** + * Create a Hive table. Following fields must be set: + * tableName + * database (only 'default' for now until Hive QL supports databases) + * owner (not needed, but good to have for tracking purposes) + * sd.cols (list of field schemas) + * sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) + * or TextInputFormat) + * sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat) + * sd.serdeInfo.serializationLib (SerDe class name + * e.g. org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe + * See notes on DDL_TIME. + */ + void create_table(1:Table tbl) + throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, + 4:NoSuchObjectException o4) + void create_table_with_environment_context(1:Table tbl, 2:EnvironmentContext environment_context) + throws (1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, + 4:NoSuchObjectException o4) + + /** + * Drops the table and all the partitions associated with it if the table has partitions + * delete data (including partitions) if deleteData is set to true. + */ void drop_table(1:string dbname, 2:string name, 3:bool deleteData) - throws(1:NoSuchObjectException o1, 2:MetaException o3) + throws(1:NoSuchObjectException o1, 2:MetaException o3) void drop_table_with_environment_context(1:string dbname, 2:string name, 3:bool deleteData, 4:EnvironmentContext environment_context) - throws(1:NoSuchObjectException o1, 2:MetaException o3) + throws(1:NoSuchObjectException o1, 2:MetaException o3) list get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1) list get_all_tables(1: string db_name) throws (1: MetaException o1) Table get_table(1:string dbname, 2:string tbl_name) throws (1:MetaException o1, 2:NoSuchObjectException o2) list get_table_objects_by_name(1:string dbname, 2:list tbl_names) - throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) - - // Get a list of table names that match a filter. - // The filter operators are LIKE, <, <=, >, >=, =, <> - // - // In the filter statement, values interpreted as strings must be enclosed in quotes, - // while values interpreted as integers should not be. Strings and integers are the only - // supported value types. - // - // The currently supported key names in the filter are: - // Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name - // and supports all filter operators - // Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times - // and supports all filter operators except LIKE - // Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values - // and only supports the filter operators = and <>. - // Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement. - // For example, to filter on parameter keys called "retention", the key name in the filter - // statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention" - // Also, = and <> only work for keys that exist - // in the tables. E.g., if you are looking for tables where key1 <> value, it will only - // look at tables that have a value for the parameter key1. - // Some example filter statements include: - // filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " + - // Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0"; - // filter = Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " + - // Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\"" - // @param dbName - // The name of the database from which you will retrieve the table names - // @param filterType - // The type of filter - // @param filter - // The filter string - // @param max_tables - // The maximum number of tables returned - // @return A list of table names that match the desired filter + throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) + + /** + * Get a list of table names that match a filter. + * The filter operators are LIKE, <, <=, >, >=, =, <>. + * + * In the filter statement, values interpreted as strings must be enclosed in quotes, + * while values interpreted as integers should not be. Strings and integers are the only + * supported value types. + * + * The currently supported key names in the filter are: + * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name + * and supports all filter operators. + * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times + * and supports all filter operators except LIKE. + * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values + * and only supports the filter operators = and <>. + * Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement. + * For example, to filter on parameter keys called "retention", the key name in the filter + * statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention" + * Also, = and <> only work for keys that exist + * in the tables. E.g., if you are looking for tables where key1 <> value, it will only + * look at tables that have a value for the parameter key1. + * Some example filter statements include: + * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " + + * Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0"; + * filter = Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " + + * Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\"" + * @param dbName + * The name of the database from which you will retrieve the table names + * @param filterType + * The type of filter + * @param filter + * The filter string + * @param max_tables + * The maximum number of tables returned + * @return A list of table names that match the desired filter + */ list get_table_names_by_filter(1:string dbname, 2:string filter, 3:i16 max_tables=-1) - throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) + throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) - // alter table applies to only future partitions not for existing partitions - // * See notes on DDL_TIME + /** + * Alter table applies to only future partitions not for existing partitions. + * See notes on DDL_TIME. + */ void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl) - throws (1:InvalidOperationException o1, 2:MetaException o2) + throws (1:InvalidOperationException o1, 2:MetaException o2) void alter_table_with_environment_context(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2) - // alter table not only applies to future partitions but also cascade to existing partitions + /** alter table not only applies to future partitions but also cascade to existing partitions */ void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade) - throws (1:InvalidOperationException o1, 2:MetaException o2) - // the following applies to only tables that have partitions - // * See notes on DDL_TIME + throws (1:InvalidOperationException o1, 2:MetaException o2) + + /** + * The following applies to only tables that have partitions. + * See notes on DDL_TIME. + */ Partition add_partition(1:Partition new_part) - throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) Partition add_partition_with_environment_context(1:Partition new_part, 2:EnvironmentContext environment_context) throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, @@ -941,8 +1089,10 @@ service ThriftHiveMetastore extends fb303.FacebookService Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name) throws(1:MetaException o1, 2:NoSuchObjectException o2) - // returns all the partitions for this table in reverse chronological order. - // If max parts is given then it will return only that many. + /** + * Returns all the partitions for this table in reverse chronological order. + * If max parts is given then it will return only that many. + */ list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, @@ -954,81 +1104,98 @@ service ThriftHiveMetastore extends fb303.FacebookService list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) throws(1:MetaException o2) - // get_partition*_ps methods allow filtering by a partial partition specification, - // as needed for dynamic partitions. The values that are not restricted should - // be empty strings. Nulls were considered (instead of "") but caused errors in - // generated Python code. The size of part_vals may be smaller than the - // number of partition columns - the unspecified values are considered the same - // as "". + /** + * get_partition*_ps methods allow filtering by a partial partition specification, + * as needed for dynamic partitions. The values that are not restricted should + * be empty strings. Nulls were considered (instead of "") but caused errors in + * generated Python code. The size of part_vals may be smaller than the + * number of partition columns - the unspecified values are considered the same + * as "". + */ list get_partitions_ps(1:string db_name 2:string tbl_name - 3:list part_vals, 4:i16 max_parts=-1) + 3:list part_vals, 4:i16 max_parts=-1) throws(1:MetaException o1, 2:NoSuchObjectException o2) list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, - 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) list get_partition_names_ps(1:string db_name, - 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) - throws(1:MetaException o1, 2:NoSuchObjectException o2) + 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) + throws(1:MetaException o1, 2:NoSuchObjectException o2) - // get the partitions matching the given partition filter + /** Get the partitions matching the given partition filter. */ list get_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter, 4:i16 max_parts=-1) throws(1:MetaException o1, 2:NoSuchObjectException o2) - // List partitions as PartitionSpec instances. + /** List partitions as PartitionSpec instances. */ list get_part_specs_by_filter(1:string db_name 2:string tbl_name 3:string filter, 4:i32 max_parts=-1) throws(1:MetaException o1, 2:NoSuchObjectException o2) - // get the partitions matching the given partition filter - // unlike get_partitions_by_filter, takes serialized hive expression, and with that can work - // with any filter (get_partitions_by_filter only works if the filter can be pushed down to JDOQL. + /** + * Get the partitions matching the given partition filter. + * Unlike get_partitions_by_filter, takes serialized hive expression, and with that can work + * with any filter (get_partitions_by_filter only works if the filter can be pushed down to JDOQL. + */ PartitionsByExprResult get_partitions_by_expr(1:PartitionsByExprRequest req) - throws(1:MetaException o1, 2:NoSuchObjectException o2) + throws(1:MetaException o1, 2:NoSuchObjectException o2) - // get partitions give a list of partition names + /** Get partitions give a list of partition names. */ list get_partitions_by_names(1:string db_name 2:string tbl_name 3:list names) - throws(1:MetaException o1, 2:NoSuchObjectException o2) + throws(1:MetaException o1, 2:NoSuchObjectException o2) - // changes the partition to the new partition object. partition is identified from the part values - // in the new_part - // * See notes on DDL_TIME + /** + * Changes the partition to the new partition object. partition is identified from the part values + * in the new_part. + * See notes on DDL_TIME. + */ void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part) - throws (1:InvalidOperationException o1, 2:MetaException o2) + throws (1:InvalidOperationException o1, 2:MetaException o2) - // change a list of partitions. All partitions are altered atomically and all - // prehooks are fired together followed by all post hooks + /** + * Change a list of partitions. All partitions are altered atomically and all + * prehooks are fired together followed by all post hooks. + */ void alter_partitions(1:string db_name, 2:string tbl_name, 3:list new_parts) - throws (1:InvalidOperationException o1, 2:MetaException o2) - - void alter_partition_with_environment_context(1:string db_name, - 2:string tbl_name, 3:Partition new_part, - 4:EnvironmentContext environment_context) - throws (1:InvalidOperationException o1, 2:MetaException o2) - - // rename the old partition to the new partition object by changing old part values to the part values - // in the new_part. old partition is identified from part_vals. - // partition keys in new_part should be the same as those in old partition. - void rename_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:Partition new_part) - throws (1:InvalidOperationException o1, 2:MetaException o2) - - // returns whether or not the partition name is valid based on the value of the config - // hive.metastore.partition.name.whitelist.pattern + throws (1:InvalidOperationException o1, 2:MetaException o2) + + void alter_partition_with_environment_context(1:string db_name, 2:string tbl_name, + 3:Partition new_part, 4:EnvironmentContext environment_context) + throws (1:InvalidOperationException o1, 2:MetaException o2) + + /** + * Rename the old partition to the new partition object by changing old part values to the part + * values in the new_part. old partition is identified from part_vals. + * Partition keys in new_part should be the same as those in old partition. + */ + void rename_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, + 4:Partition new_part) + throws (1:InvalidOperationException o1, 2:MetaException o2) + + /** + * Returns whether or not the partition name is valid based on the value of the config + * hive.metastore.partition.name.whitelist.pattern. + */ bool partition_name_has_valid_characters(1:list part_vals, 2:bool throw_exception) - throws(1: MetaException o1) - - // gets the value of the configuration key in the metastore server. returns - // defaultValue if the key does not exist. if the configuration key does not - // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is - // thrown. + throws(1: MetaException o1) + + /** + * Gets the value of the configuration key in the metastore server. Returns + * defaultValue if the key does not exist. if the configuration key does not + * begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is + * thrown. + */ string get_config_value(1:string name, 2:string defaultValue) throws(1:ConfigValSecurityException o1) - // converts a partition name into a partition values array + /** Converts a partition name into a partition values array. */ list partition_name_to_vals(1: string part_name) throws(1: MetaException o1) - // converts a partition name into a partition specification (a mapping from - // the partition cols to the values) + + /** + * Converts a partition name into a partition specification (a mapping from + * the partition cols to the values) + */ map partition_name_to_spec(1: string part_name) throws(1: MetaException o1) @@ -1041,7 +1208,7 @@ service ThriftHiveMetastore extends fb303.FacebookService 3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5, 6: InvalidPartitionException o6) - //index + // Index Index add_index(1:Index new_index, 2: Table index_table) throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx) @@ -1056,7 +1223,7 @@ service ThriftHiveMetastore extends fb303.FacebookService list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) throws(1:MetaException o2) - // column statistics interfaces + // Column statistics interfaces // update APIs persist the column statistics object(s) that are passed in. If statistics already // exists for one or more columns, the existing statistics will be overwritten. The update APIs @@ -1124,23 +1291,30 @@ service ThriftHiveMetastore extends fb303.FacebookService bool create_role(1:Role role) throws(1:MetaException o1) bool drop_role(1:string role_name) throws(1:MetaException o1) list get_role_names() throws(1:MetaException o1) - // Deprecated, use grant_revoke_role() + + /** Deprecated, use grant_revoke_role(). */ bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type, 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1) - // Deprecated, use grant_revoke_role() + + /** Deprecated, use grant_revoke_role(). */ bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type) throws(1:MetaException o1) + list list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1) GrantRevokeRoleResponse grant_revoke_role(1:GrantRevokeRoleRequest request) throws(1:MetaException o1) - // get all role-grants for users/roles that have been granted the given role - // Note that in the returned list of RolePrincipalGrants, the roleName is - // redundant as it would match the role_name argument of this function + /** + * Get all role-grants for users/roles that have been granted the given role. + * Note that in the returned list of RolePrincipalGrants, the roleName is + * redundant as it would match the role_name argument of this function. + */ GetPrincipalsInRoleResponse get_principals_in_role(1: GetPrincipalsInRoleRequest request) throws(1:MetaException o1) - // get grant information of all roles granted to the given principal - // Note that in the returned list of RolePrincipalGrants, the principal name,type is - // redundant as it would match the principal name,type arguments of this function + /** + * Get grant information of all roles granted to the given principal. + * Note that in the returned list of RolePrincipalGrants, the principal name,type is + * redundant as it would match the principal name,type arguments of this function. + */ GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(1: GetRoleGrantsForPrincipalRequest request) throws(1:MetaException o1) PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name, @@ -1148,33 +1322,40 @@ service ThriftHiveMetastore extends fb303.FacebookService list list_privileges(1:string principal_name, 2:PrincipalType principal_type, 3: HiveObjectRef hiveObject) throws(1:MetaException o1) - // Deprecated, use grant_revoke_privileges() + /** Deprecated, use grant_revoke_privileges(). */ bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) - // Deprecated, use grant_revoke_privileges() + + /** Deprecated, use grant_revoke_privileges(). */ bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) GrantRevokePrivilegeResponse grant_revoke_privileges(1:GrantRevokePrivilegeRequest request) throws(1:MetaException o1); - // this is used by metastore client to send UGI information to metastore server immediately - // after setting up a connection. + /** + * This is used by metastore client to send UGI information to metastore server immediately + * after setting up a connection. + */ list set_ugi(1:string user_name, 2:list group_names) throws (1:MetaException o1) //Authentication (delegation token) interfaces - // get metastore server delegation token for use from the map/reduce tasks to authenticate - // to metastore server + /** + * Get metastore server delegation token for use from the map/reduce tasks to authenticate + * to metastore server. + */ string get_delegation_token(1:string token_owner, 2:string renewer_kerberos_principal_name) throws (1:MetaException o1) - // method to renew delegation token obtained from metastore server + /** Method to renew delegation token obtained from metastore server. */ i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1) - // method to cancel delegation token obtained from metastore server + /** Method to cancel delegation token obtained from metastore server. */ void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1) // Transaction and lock management calls - // Get just list of open transactions + + /** Get just list of open transactions. */ GetOpenTxnsResponse get_open_txns() - // Get list of open transactions with state (open, aborted) + + /** Get list of open transactions with state (open, aborted). */ GetOpenTxnsInfoResponse get_open_txns_info() OpenTxnsResponse open_txns(1:OpenTxnRequest rqst) void abort_txn(1:AbortTxnRequest rqst) throws (1:NoSuchTxnException o1) @@ -1186,7 +1367,7 @@ service ThriftHiveMetastore extends fb303.FacebookService ShowLocksResponse show_locks(1:ShowLocksRequest rqst) void heartbeat(1:HeartbeatRequest ids) throws (1:NoSuchLockException o1, 2:NoSuchTxnException o2, 3:TxnAbortedException o3) HeartbeatTxnRangeResponse heartbeat_txn_range(1:HeartbeatTxnRangeRequest txns) - void compact(1:CompactionRequest rqst) + void compact(1:CompactionRequest rqst) ShowCompactResponse show_compact(1:ShowCompactRequest rqst) void add_dynamic_partitions(1:AddDynamicPartitions rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2) @@ -1201,17 +1382,22 @@ service ThriftHiveMetastore extends fb303.FacebookService // For storing info about archived partitions in parameters -// Whether the partition is archived +/** Whether the partition is archived. */ const string IS_ARCHIVED = "is_archived", -// The original location of the partition, before archiving. After archiving, -// this directory will contain the archive. When the partition -// is dropped, this directory will be deleted + +/** + * The original location of the partition, before archiving. After archiving, + * this directory will contain the archive. When the partition + * is dropped, this directory will be deleted. + */ const string ORIGINAL_LOCATION = "original_location", -// Whether or not the table is considered immutable - immutable tables can only be -// overwritten or created if unpartitioned, or if partitioned, partitions inside them -// can only be overwritten or created. Immutability supports write-once and replace -// semantics, but not append. +/** + * Whether or not the table is considered immutable - immutable tables can only be + * overwritten or created if unpartitioned, or if partitioned, partitions inside them + * can only be overwritten or created. Immutability supports write-once and replace + * semantics, but not append. + */ const string IS_IMMUTABLE = "immutable", // these should be needed only for backward compatibility with filestore diff --git a/service/if/TCLIService.thrift b/service/if/TCLIService.thrift index baf583f..99137fa 100644 --- a/service/if/TCLIService.thrift +++ b/service/if/TCLIService.thrift @@ -35,30 +35,32 @@ namespace java org.apache.hive.service.cli.thrift namespace cpp apache.hive.service.cli.thrift -// List of protocol versions. A new token should be -// added to the end of this list every time a change is made. +/** + * List of protocol versions. + * A new token should be added to the end of this list every time a change is made. + **/ enum TProtocolVersion { HIVE_CLI_SERVICE_PROTOCOL_V1, - - // V2 adds support for asynchronous execution + + /** V2 adds support for asynchronous execution. */ HIVE_CLI_SERVICE_PROTOCOL_V2 - // V3 add varchar type, primitive type qualifiers + /** V3 adds varchar type, primitive type qualifiers. */ HIVE_CLI_SERVICE_PROTOCOL_V3 - // V4 add decimal precision/scale, char type + /** V4 adds decimal precision/scale, char type. */ HIVE_CLI_SERVICE_PROTOCOL_V4 - // V5 adds error details when GetOperationStatus returns in error state + /** V5 adds error details when GetOperationStatus returns in error state. */ HIVE_CLI_SERVICE_PROTOCOL_V5 - // V6 uses binary type for binary payload (was string) and uses columnar result set + /** V6 uses binary type for binary payload (was string) and uses columnar result set. */ HIVE_CLI_SERVICE_PROTOCOL_V6 - // V7 adds support for delegation token based connection + /** V7 adds support for delegation token based connection. */ HIVE_CLI_SERVICE_PROTOCOL_V7 - // V8 adds support for interval types + /** V8 adds support for interval types. */ HIVE_CLI_SERVICE_PROTOCOL_V8 } @@ -86,7 +88,7 @@ enum TTypeId { INTERVAL_YEAR_MONTH_TYPE, INTERVAL_DAY_TIME_TYPE } - + const set PRIMITIVE_TYPES = [ TTypeId.BOOLEAN_TYPE, TTypeId.TINYINT_TYPE, @@ -144,55 +146,56 @@ const map TYPE_NAMES = { TTypeId.INTERVAL_DAY_TIME_TYPE: "INTERVAL_DAY_TIME" } -// Thrift does not support recursively defined types or forward declarations, -// which makes it difficult to represent Hive's nested types. -// To get around these limitations TTypeDesc employs a type list that maps -// integer "pointers" to TTypeEntry objects. The following examples show -// how different types are represented using this scheme: -// -// "INT": -// TTypeDesc { -// types = [ -// TTypeEntry.primitive_entry { -// type = INT_TYPE -// } -// ] -// } -// -// "ARRAY": -// TTypeDesc { -// types = [ -// TTypeEntry.array_entry { -// object_type_ptr = 1 -// }, -// TTypeEntry.primitive_entry { -// type = INT_TYPE -// } -// ] -// } -// -// "MAP": -// TTypeDesc { -// types = [ -// TTypeEntry.map_entry { -// key_type_ptr = 1 -// value_type_ptr = 2 -// }, -// TTypeEntry.primitive_entry { -// type = INT_TYPE -// }, -// TTypeEntry.primitive_entry { -// type = STRING_TYPE -// } -// ] -// } - +/** + * Thrift does not support recursively defined types or forward declarations, + * which makes it difficult to represent Hive's nested types. + * To get around these limitations TTypeDesc employs a type list that maps + * integer "pointers" to TTypeEntry objects. The following examples show + * how different types are represented using this scheme: + * + * "INT": + * TTypeDesc { + * types = [ + * TTypeEntry.primitive_entry { + * type = INT_TYPE + * } + * ] + * } + * + * "ARRAY": + * TTypeDesc { + * types = [ + * TTypeEntry.array_entry { + * object_type_ptr = 1 + * }, + * TTypeEntry.primitive_entry { + * type = INT_TYPE + * } + * ] + * } + * + * "MAP": + * TTypeDesc { + * types = [ + * TTypeEntry.map_entry { + * key_type_ptr = 1 + * value_type_ptr = 2 + * }, + * TTypeEntry.primitive_entry { + * type = INT_TYPE + * }, + * TTypeEntry.primitive_entry { + * type = STRING_TYPE + * } + * ] + * } + */ typedef i32 TTypeEntryPtr -// Valid TTypeQualifiers key names +/** Valid TTypeQualifiers key names */ const string CHARACTER_MAXIMUM_LENGTH = "characterMaximumLength" -// Type qualifier key name for decimal +/** Type qualifier key name for decimal */ const string PRECISION = "precision" const string SCALE = "scale" @@ -201,46 +204,51 @@ union TTypeQualifierValue { 2: optional string stringValue } -// Type qualifiers for primitive type. +/** + * Type qualifiers for primitive types. + * Can be used to store metadata about a column of a primitive type (e.g. precision for a DECIMAL). + */ struct TTypeQualifiers { 1: required map qualifiers } -// Type entry for a primitive type. +/** Type entry for a primitive type. */ struct TPrimitiveTypeEntry { - // The primitive type token. This must satisfy the condition - // that type is in the PRIMITIVE_TYPES set. + /** + * The primitive type token. + * This must satisfy the condition that type is in the PRIMITIVE_TYPES set. + **/ 1: required TTypeId type 2: optional TTypeQualifiers typeQualifiers } -// Type entry for an ARRAY type. +/** Type entry for an ARRAY type. */ struct TArrayTypeEntry { 1: required TTypeEntryPtr objectTypePtr } -// Type entry for a MAP type. +/** Type entry for a MAP type. */ struct TMapTypeEntry { 1: required TTypeEntryPtr keyTypePtr 2: required TTypeEntryPtr valueTypePtr } -// Type entry for a STRUCT type. +/** Type entry for a STRUCT type. */ struct TStructTypeEntry { 1: required map nameToTypePtr } -// Type entry for a UNIONTYPE type. +/** Type entry for a UNIONTYPE type. */ struct TUnionTypeEntry { 1: required map nameToTypePtr } struct TUserDefinedTypeEntry { - // The fully qualified name of the class implementing this type. + /** The fully qualified name of the class implementing this type. */ 1: required string typeClassName } -// We use a union here since Thrift does not support inheritance. +/** We use a union here since Thrift does not support inheritance. */ union TTypeEntry { 1: TPrimitiveTypeEntry primitiveEntry 2: TArrayTypeEntry arrayEntry @@ -250,81 +258,87 @@ union TTypeEntry { 6: TUserDefinedTypeEntry userDefinedTypeEntry } -// Type descriptor for columns. +/** Type descriptor for columns. */ struct TTypeDesc { - // The "top" type is always the first element of the list. - // If the top type is an ARRAY, MAP, STRUCT, or UNIONTYPE - // type, then subsequent elements represent nested types. + /** + * The "top" type is always the first element of the list. + * If the top type is an ARRAY, MAP, STRUCT, or UNIONTYPE + * type, then subsequent elements represent nested types. + */ 1: required list types } -// A result set column descriptor. +/** A result set column descriptor. */ struct TColumnDesc { - // The name of the column + /** The name of the column. */ 1: required string columnName - // The type descriptor for this column + /** The type descriptor for this column. */ 2: required TTypeDesc typeDesc - - // The ordinal position of this column in the schema + + /** The ordinal position of this column in the schema. */ 3: required i32 position 4: optional string comment } -// Metadata used to describe the schema (column names, types, comments) -// of result sets. +/** + * Metadata used to describe the schema (column names, types, comments) + * of result sets. + */ struct TTableSchema { 1: required list columns } -// A Boolean column value. +/** A Boolean column value. */ struct TBoolValue { - // NULL if value is unset. + /** NULL if value is unset. */ 1: optional bool value } -// A Byte column value. +/** A Byte column value. */ struct TByteValue { - // NULL if value is unset. + /** NULL if value is unset. */ 1: optional byte value } -// A signed, 16 bit column value. +/** A signed, 16 bit column value. */ struct TI16Value { - // NULL if value is unset + /** NULL if value is unset. */ 1: optional i16 value } -// A signed, 32 bit column value +/** A signed, 32 bit column value */ struct TI32Value { - // NULL if value is unset + /** NULL if value is unset. */ 1: optional i32 value } -// A signed 64 bit column value +/** A signed 64 bit column value */ struct TI64Value { - // NULL if value is unset + /** NULL if value is unset. */ 1: optional i64 value } -// A floating point 64 bit column value +/** A floating point 64 bit column value */ struct TDoubleValue { - // NULL if value is unset + /** NULL if value is unset. */ 1: optional double value } struct TStringValue { - // NULL if value is unset + /** NULL if value is unset. */ 1: optional string value } -// A single column value in a result set. -// Note that Hive's type system is richer than Thrift's, -// so in some cases we have to map multiple Hive types -// to the same Thrift type. On the client-side this is -// disambiguated by looking at the Schema of the -// result set. +/** + * A single column value in a result set. + * Note that Hive's type system is richer than Thrift's, + * so in some cases we have to map multiple Hive types + * to the same Thrift type. On the client-side this is + * disambiguated by looking at the Schema of the + * result set. + */ union TColumnValue { 1: TBoolValue boolVal // BOOLEAN 2: TByteValue byteVal // TINYINT @@ -335,7 +349,7 @@ union TColumnValue { 7: TStringValue stringVal // STRING, LIST, MAP, STRUCT, UNIONTYPE, BINARY, DECIMAL, NULL, INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME } -// Represents a row in a rowset. +/** Represents a row in a rowset. */ struct TRow { 1: required list colVals } @@ -380,11 +394,13 @@ struct TBinaryColumn { 2: required binary nulls } -// Note that Hive's type system is richer than Thrift's, -// so in some cases we have to map multiple Hive types -// to the same Thrift type. On the client-side this is -// disambiguated by looking at the Schema of the -// result set. +/* + * Note that Hive's type system is richer than Thrift's, + * so in some cases we have to map multiple Hive types + * to the same Thrift type. On the client-side this is + * disambiguated by looking at the Schema of the + * result set. + */ union TColumn { 1: TBoolColumn boolVal // BOOLEAN 2: TByteColumn byteVal // TINYINT @@ -396,15 +412,15 @@ union TColumn { 8: TBinaryColumn binaryVal // BINARY } -// Represents a rowset +/** Represents a rowset. */ struct TRowSet { - // The starting row offset of this rowset. + /** The starting row offset of this rowset. */ 1: required i64 startRowOffset 2: required list rows 3: optional list columns } -// The return status code contained in each response. +/** The return status code contained in each response. */ enum TStatusCode { SUCCESS_STATUS, SUCCESS_WITH_INFO_STATUS, @@ -413,89 +429,116 @@ enum TStatusCode { INVALID_HANDLE_STATUS } -// The return status of a remote request +/** The return status of a remote request. */ struct TStatus { 1: required TStatusCode statusCode - // If status is SUCCESS_WITH_INFO, info_msgs may be populated with - // additional diagnostic information. + /** + * If statusCode is SUCCESS_WITH_INFO, infoMessages may be populated with + * additional diagnostic information. + */ 2: optional list infoMessages - // If status is ERROR, then the following fields may be set - 3: optional string sqlState // as defined in the ISO/IEF CLI specification - 4: optional i32 errorCode // internal error code + /** + * sqlState as defined in the ISO/IEF CLI specification. + * Only set if opeartionState is ERROR_STATE. + **/ + 3: optional string sqlState + + /** + * Internal error code. + * Only set if opeartionState is ERROR_STATE. + **/ + 4: optional i32 errorCode + + /** + * Error message. + * Only set if opeartionState is ERROR_STATE. + **/ 5: optional string errorMessage + } -// The state of an operation (i.e. a query or other -// asynchronous operation that generates a result set) -// on the server. +/* + * The state of an operation (i.e. a query or other + * asynchronous operation that generates a result set) + * on the server. + */ enum TOperationState { - // The operation has been initialized + /** The operation has been initialized. */ INITIALIZED_STATE, - // The operation is running. In this state the result - // set is not available. + /** The operation is running. In this state the result set is not available. */ RUNNING_STATE, - // The operation has completed. When an operation is in - // this state its result set may be fetched. + /** + * The operation has completed. + * When an operation is in this state its result set may be fetched. + **/ FINISHED_STATE, - // The operation was canceled by a client + /** The operation was canceled by a client. */ CANCELED_STATE, - // The operation was closed by a client + /** The operation was closed by a client. */ CLOSED_STATE, - // The operation failed due to an error + /** The operation failed due to an error. */ ERROR_STATE, - // The operation is in an unrecognized state + /** The operation is in an unrecognized state. */ UKNOWN_STATE, - // The operation is in an pending state + /** The operation is in an pending state. */ PENDING_STATE, } -// A string identifier. This is interpreted literally. +/** A string identifier. This is interpreted literally. */ typedef string TIdentifier -// A search pattern. -// -// Valid search pattern characters: -// '_': Any single character. -// '%': Any sequence of zero or more characters. -// '\': Escape character used to include special characters, -// e.g. '_', '%', '\'. If a '\' precedes a non-special -// character it has no special meaning and is interpreted -// literally. +/** + * A search pattern. + * + * Valid search pattern characters: + * '_': Any single character. + * '%': Any sequence of zero or more characters. + * '\': Escape character used to include special characters, + * e.g. '_', '%', '\'. If a '\' precedes a non-special + * character it has no special meaning and is interpreted + * literally. + */ typedef string TPattern - -// A search pattern or identifier. Used as input -// parameter for many of the catalog functions. +/** + * A search pattern or identifier. Used as input + * parameter for many of the catalog functions. + */ typedef string TPatternOrIdentifier struct THandleIdentifier { - // 16 byte globally unique identifier - // This is the public ID of the handle and - // can be used for reporting. + /** + * 16 byte globally unique identifier + * This is the public ID of the handle and + * can be used for reporting. + */ 1: required binary guid, - // 16 byte secret generated by the server - // and used to verify that the handle is not - // being hijacked by another user. + /** + * 16 byte secret generated by the server + * and used to verify that the handle is not + * being hijacked by another user. + */ 2: required binary secret, } -// Client-side handle to persistent -// session information on the server-side. +/** + * Client-side handle to persistent session information on the server-side. + */ struct TSessionHandle { 1: required THandleIdentifier sessionId } -// The subtype of an OperationHandle. +/** The subtype of an OperationHandle. */ enum TOperationType { EXECUTE_STATEMENT, GET_TYPE_INFO, @@ -508,83 +551,98 @@ enum TOperationType { UNKNOWN, } -// Client-side reference to a task running -// asynchronously on the server. +/** + * Client-side reference to a task running + * asynchronously on the server. + */ struct TOperationHandle { 1: required THandleIdentifier operationId 2: required TOperationType operationType - // If hasResultSet = TRUE, then this operation - // generates a result set that can be fetched. - // Note that the result set may be empty. - // - // If hasResultSet = FALSE, then this operation - // does not generate a result set, and calling - // GetResultSetMetadata or FetchResults against - // this OperationHandle will generate an error. + /** + * If hasResultSet = TRUE, then this operation + * generates a result set that can be fetched. + * Note that the result set may be empty. + * + * If hasResultSet = FALSE, then this operation + * does not generate a result set, and calling + * GetResultSetMetadata or FetchResults against + * this OperationHandle will generate an error. + */ 3: required bool hasResultSet - // For operations that don't generate result sets, - // modifiedRowCount is either: - // - // 1) The number of rows that were modified by - // the DML operation (e.g. number of rows inserted, - // number of rows deleted, etc). - // - // 2) 0 for operations that don't modify or add rows. - // - // 3) < 0 if the operation is capable of modifiying rows, - // but Hive is unable to determine how many rows were - // modified. For example, Hive's LOAD DATA command - // doesn't generate row count information because - // Hive doesn't inspect the data as it is loaded. - // - // modifiedRowCount is unset if the operation generates - // a result set. + /** + * For operations that don't generate result sets, + * modifiedRowCount is either: + * + * 1) The number of rows that were modified by + * the DML operation (e.g. number of rows inserted, + * number of rows deleted, etc). + * + * 2) 0 for operations that don't modify or add rows. + * + * 3) < 0 if the operation is capable of modifiying rows, + * but Hive is unable to determine how many rows were + * modified. For example, Hive's LOAD DATA command + * doesn't generate row count information because + * Hive doesn't inspect the data as it is loaded. + * + * modifiedRowCount is unset if the operation generates + * a result set. + */ 4: optional double modifiedRowCount } -// OpenSession() -// -// Open a session (connection) on the server against -// which operations may be executed. +/* + * Request to open a session (connection) on the server against + * which operations may be executed. + */ struct TOpenSessionReq { - // The version of the HiveServer2 protocol that the client is using. + /** The version of the HiveServer2 protocol that the client is using. */ 1: required TProtocolVersion client_protocol = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8 - // Username and password for authentication. - // Depending on the authentication scheme being used, - // this information may instead be provided by a lower - // protocol layer, in which case these fields may be - // left unset. + /** + * Username for authentication. + * Depending on the authentication scheme being used, + * this information may instead be provided by a lower + * protocol layer, in which case this and the password + * field may be left unset. + */ 2: optional string username + + /** + * Password for authentication. + * Depending on the authentication scheme being used, + * this information may instead be provided by a lower + * protocol layer, in which case this and the username + * field may be left unset. + */ 3: optional string password - // Configuration overlay which is applied when the session is - // first created. + /** Configuration overlay which is applied when the session is first created. */ 4: optional map configuration } struct TOpenSessionResp { 1: required TStatus status - // The protocol version that the server is using. + /** The protocol version that the server is using. */ 2: required TProtocolVersion serverProtocolVersion = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8 - // Session Handle + /** Session Handle. */ 3: optional TSessionHandle sessionHandle - // The configuration settings for this session. + /** The configuration settings for this session. */ 4: optional map configuration } -// CloseSession() -// -// Closes the specified session and frees any resources -// currently allocated to that session. Any open -// operations in that session will be canceled. +/** + * Request to lose the specified session and free any resources + * currently allocated to that session. Any open + * operations in that session will be canceled. + */ struct TCloseSessionReq { 1: required TSessionHandle sessionHandle } @@ -655,13 +713,12 @@ union TGetInfoValue { 6: i64 lenValue } -// GetInfo() -// -// This function is based on ODBC's CLIGetInfo() function. -// The function returns general information about the data source -// using the same keys as ODBC. +/** + * Request to return general information about the data source. + * This function is based on ODBC's CLIGetInfo() function and uses the same keys. + **/ struct TGetInfoReq { - // The sesssion to run this request against + /** The sesssion to run this request against. */ 1: required TSessionHandle sessionHandle 2: required TGetInfoType infoType @@ -673,27 +730,28 @@ struct TGetInfoResp { 2: required TGetInfoValue infoValue } - -// ExecuteStatement() -// -// Execute a statement. -// The returned OperationHandle can be used to check on the -// status of the statement, and to fetch results once the -// statement has finished executing. +/** + * Request to execute a statement. + * The returned OperationHandle can be used to check on the + * status of the statement, and to fetch results once the + * statement has finished executing. + */ struct TExecuteStatementReq { - // The session to execute the statement against + /** The session to execute the statement against. */ 1: required TSessionHandle sessionHandle - // The statement to be executed (DML, DDL, SET, etc) + /** The statement to be executed (DML, DDL, SET, etc). */ 2: required string statement - // Configuration properties that are overlayed on top of the - // the existing session configuration before this statement - // is executed. These properties apply to this statement - // only and will not affect the subsequent state of the Session. + /** + * Configuration properties that are overlayed on top of the + * the existing session configuration before this statement + * is executed. These properties apply to this statement + * only and will not affect the subsequent state of the Session. + */ 3: optional map confOverlay - - // Execute asynchronously when runAsync is true + + /** Execute asynchronously when runAsync is true. */ 4: optional bool runAsync = false } @@ -702,38 +760,38 @@ struct TExecuteStatementResp { 2: optional TOperationHandle operationHandle } -// GetTypeInfo() -// -// Get information about types supported by the HiveServer instance. -// The information is returned as a result set which can be fetched -// using the OperationHandle provided in the response. -// -// Refer to the documentation for ODBC's CLIGetTypeInfo function for -// the format of the result set. +/** + * Request to get information about types supported by the HiveServer instance. + * The information is returned as a result set which can be fetched + * using the OperationHandle provided in the response. + * + * Refer to the documentation for ODBC's CLIGetTypeInfo function for + * the format of the result set. + */ struct TGetTypeInfoReq { - // The session to run this request against. + /** The session to run this request against. */ 1: required TSessionHandle sessionHandle } struct TGetTypeInfoResp { 1: required TStatus status 2: optional TOperationHandle operationHandle -} +} -// GetCatalogs() -// -// Returns the list of catalogs (databases) -// Results are ordered by TABLE_CATALOG -// -// Resultset columns : -// col1 -// name: TABLE_CAT -// type: STRING -// desc: Catalog name. NULL if not applicable. -// +/** + * Request to return the list of catalogs. + * This concept is not currently supported in Hive and it will + * always return an empty result set. + * + * Resultset columns : + * col1 + * name: TABLE_CAT + * type: STRING + * desc: Catalog name. NULL if not applicable. + */ struct TGetCatalogsReq { - // Session to run this request against + /** Session to run this request against. */ 1: required TSessionHandle sessionHandle } @@ -743,26 +801,26 @@ struct TGetCatalogsResp { } -// GetSchemas() -// -// Retrieves the schema names available in this database. -// The results are ordered by TABLE_CATALOG and TABLE_SCHEM. -// col1 -// name: TABLE_SCHEM -// type: STRING -// desc: schema name -// col2 -// name: TABLE_CATALOG -// type: STRING -// desc: catalog name +/** + * Request to retrieve the schema names available in this database. + * The results are ordered by TABLE_CATALOG and TABLE_SCHEM. + * col1 + * name: TABLE_SCHEM + * type: STRING + * desc: schema name + * col2 + * name: TABLE_CATALOG + * type: STRING + * desc: catalog name + */ struct TGetSchemasReq { - // Session to run this request against + /** Session to run this request against. */ 1: required TSessionHandle sessionHandle - // Name of the catalog. Must not contain a search pattern. + /** Name of the catalog. Must not contain a search pattern. */ 2: optional TIdentifier catalogName - // schema name or pattern + /** schema name or pattern. */ 3: optional TPatternOrIdentifier schemaName } @@ -771,58 +829,57 @@ struct TGetSchemasResp { 2: optional TOperationHandle operationHandle } - -// GetTables() -// -// Returns a list of tables with catalog, schema, and table -// type information. The information is returned as a result -// set which can be fetched using the OperationHandle -// provided in the response. -// Results are ordered by TABLE_TYPE, TABLE_CAT, TABLE_SCHEM, and TABLE_NAME -// -// Result Set Columns: -// -// col1 -// name: TABLE_CAT -// type: STRING -// desc: Catalog name. NULL if not applicable. -// -// col2 -// name: TABLE_SCHEM -// type: STRING -// desc: Schema name. -// -// col3 -// name: TABLE_NAME -// type: STRING -// desc: Table name. -// -// col4 -// name: TABLE_TYPE -// type: STRING -// desc: The table type, e.g. "TABLE", "VIEW", etc. -// -// col5 -// name: REMARKS -// type: STRING -// desc: Comments about the table -// +/** + * Request to return a list of tables with catalog, schema, and table + * type information. The information is returned as a result + * set which can be fetched using the OperationHandle provided in the response. + * Results are ordered by TABLE_TYPE, TABLE_CAT, TABLE_SCHEM, and TABLE_NAME + * + * Result Set Columns: + * + * col1 + * name: TABLE_CAT + * type: STRING + * desc: Catalog name. NULL if not applicable. + * + * col2 + * name: TABLE_SCHEM + * type: STRING + * desc: Schema name. + * + * col3 + * name: TABLE_NAME + * type: STRING + * desc: Table name. + * + * col4 + * name: TABLE_TYPE + * type: STRING + * desc: The table type, e.g. "TABLE", "VIEW", etc. + * + * col5 + * name: REMARKS + * type: STRING + * desc: Comments about the table + */ struct TGetTablesReq { - // Session to run this request against + /** Session to run this request against. */ 1: required TSessionHandle sessionHandle - // Name of the catalog or a search pattern. + /** Name of the catalog or a search pattern. */ 2: optional TPatternOrIdentifier catalogName - // Name of the schema or a search pattern. + /** Name of the schema or a search pattern. */ 3: optional TPatternOrIdentifier schemaName - // Name of the table or a search pattern. + /** Name of the table or a search pattern. */ 4: optional TPatternOrIdentifier tableName - // List of table types to match - // e.g. "TABLE", "VIEW", "SYSTEM TABLE", "GLOBAL TEMPORARY", - // "LOCAL TEMPORARY", "ALIAS", "SYNONYM", etc. + /** + * List of table types to match + * e.g. "TABLE", "VIEW", "SYSTEM TABLE", "GLOBAL TEMPORARY", + * "LOCAL TEMPORARY", "ALIAS", "SYNONYM", etc. + */ 5: optional list tableTypes } @@ -832,17 +889,17 @@ struct TGetTablesResp { } -// GetTableTypes() -// -// Returns the table types available in this database. -// The results are ordered by table type. -// -// col1 -// name: TABLE_TYPE -// type: STRING -// desc: Table type name. +/** + * Request to return the table types available in this database. + * The results are ordered by table type. + * + * col1 + * name: TABLE_TYPE + * type: STRING + * desc: Table type name. + */ struct TGetTableTypesReq { - // Session to run this request against + /** Session to run this request against. */ 1: required TSessionHandle sessionHandle } @@ -851,32 +908,29 @@ struct TGetTableTypesResp { 2: optional TOperationHandle operationHandle } - -// GetColumns() -// -// Returns a list of columns in the specified tables. -// The information is returned as a result set which can be fetched -// using the OperationHandle provided in the response. -// Results are ordered by TABLE_CAT, TABLE_SCHEM, TABLE_NAME, -// and ORDINAL_POSITION. -// -// Result Set Columns are the same as those for the ODBC CLIColumns -// function. -// +/** + * Request to returns a list of columns in the specified tables. + * The information is returned as a result set which can be fetched + * using the OperationHandle provided in the response. + * Results are ordered by TABLE_CAT, TABLE_SCHEM, TABLE_NAME, + * and ORDINAL_POSITION. + * + * Result set columns are the same as those for the ODBC CLIColumns function. + */ struct TGetColumnsReq { - // Session to run this request against + /** Session to run this request against. */ 1: required TSessionHandle sessionHandle - // Name of the catalog. Must not contain a search pattern. + /** Name of the catalog. Must not contain a search pattern. */ 2: optional TIdentifier catalogName - // Schema name or search pattern + /** Schema name or search pattern. */ 3: optional TPatternOrIdentifier schemaName - // Table name or search pattern + /** Table name or search pattern. */ 4: optional TPatternOrIdentifier tableName - // Column name or search pattern + /** Column name or search pattern. */ 5: optional TPatternOrIdentifier columnName } @@ -885,82 +939,84 @@ struct TGetColumnsResp { 2: optional TOperationHandle operationHandle } - -// GetFunctions() -// -// Returns a list of functions supported by the data source. The -// behavior of this function matches -// java.sql.DatabaseMetaData.getFunctions() both in terms of -// inputs and outputs. -// -// Result Set Columns: -// -// col1 -// name: FUNCTION_CAT -// type: STRING -// desc: Function catalog (may be null) -// -// col2 -// name: FUNCTION_SCHEM -// type: STRING -// desc: Function schema (may be null) -// -// col3 -// name: FUNCTION_NAME -// type: STRING -// desc: Function name. This is the name used to invoke the function. -// -// col4 -// name: REMARKS -// type: STRING -// desc: Explanatory comment on the function. -// -// col5 -// name: FUNCTION_TYPE -// type: SMALLINT -// desc: Kind of function. One of: -// * functionResultUnknown - Cannot determine if a return value or a table -// will be returned. -// * functionNoTable - Does not a return a table. -// * functionReturnsTable - Returns a table. -// -// col6 -// name: SPECIFIC_NAME -// type: STRING -// desc: The name which uniquely identifies this function within its schema. -// In this case this is the fully qualified class name of the class -// that implements this function. -// +/** + * Request to return a list of functions supported by the data source. + * The behavior of this function matches java.sql.DatabaseMetaData.getFunctions() + * both in terms of inputs and outputs. + * + * Result Set Columns: + * + * col1 + * name: FUNCTION_CAT + * type: STRING + * desc: Function catalog (may be null) + * + * col2 + * name: FUNCTION_SCHEM + * type: STRING + * desc: Function schema (may be null) + * + * col3 + * name: FUNCTION_NAME + * type: STRING + * desc: Function name. This is the name used to invoke the function. + * + * col4 + * name: REMARKS + * type: STRING + * desc: Explanatory comment on the function. + * + * col5 + * name: FUNCTION_TYPE + * type: SMALLINT + * desc: Kind of function. One of: + * * functionResultUnknown - Cannot determine if a return value or a table + * will be returned. + * * functionNoTable - Does not a return a table. + * * functionReturnsTable - Returns a table. + * + * col6 + * name: SPECIFIC_NAME + * type: STRING + * desc: The name which uniquely identifies this function within its schema. + * In this case this is the fully qualified class name of the class + * that implements this function. + */ struct TGetFunctionsReq { - // Session to run this request against + /** Session to run this request against. */ 1: required TSessionHandle sessionHandle - // A catalog name; must match the catalog name as it is stored in the - // database; "" retrieves those without a catalog; null means - // that the catalog name should not be used to narrow the search. + /** + * A catalog name. Must match the catalog name as it is stored in the database. + * "" retrieves those without a catalog. + * NULL means that the catalog name should not be used to narrow the search. + */ 2: optional TIdentifier catalogName - // A schema name pattern; must match the schema name as it is stored - // in the database; "" retrieves those without a schema; null means - // that the schema name should not be used to narrow the search. + /** + * A schema name pattern. Must match the schema name as it is stored in the database. + * "" retrieves those without a schema. + * NULL means that the schema name should not be used to narrow the search. + */ 3: optional TPatternOrIdentifier schemaName - // A function name pattern; must match the function name as it is stored - // in the database. - 4: required TPatternOrIdentifier functionName + /** + * A function name pattern. Must match the function name as it is stored in the database. + */ + 4: required TPatternOrIdentifier functionName } struct TGetFunctionsResp { 1: required TStatus status 2: optional TOperationHandle operationHandle } - -// GetOperationStatus() -// -// Get the status of an operation running on the server. + +/** + * Request to get the status of an operation running on the server. + **/ struct TGetOperationStatusReq { - // Session to run this request against + /** Session to run this request against. */ 1: required TOperationHandle operationHandle } @@ -968,24 +1024,32 @@ struct TGetOperationStatusResp { 1: required TStatus status 2: optional TOperationState operationState - // If operationState is ERROR_STATE, then the following fields may be set - // sqlState as defined in the ISO/IEF CLI specification + /** + * sqlState as defined in the ISO/IEF CLI specification. + * Only set if opeartionState is ERROR_STATE. + **/ 3: optional string sqlState - // Internal error code + /** + * Internal error code. + * Only set if opeartionState is ERROR_STATE. + **/ 4: optional i32 errorCode - // Error message + /** + * Error message. + * Only set if opeartionState is ERROR_STATE. + **/ 5: optional string errorMessage } -// CancelOperation() -// -// Cancels processing on the specified operation handle and -// frees any resources which were allocated. +/** + * Request to cancel an operation. + * This will free any resources that were allocated for this operation. + **/ struct TCancelOperationReq { - // Operation to cancel + /** Operation to cancel. */ 1: required TOperationHandle operationHandle } @@ -993,14 +1057,12 @@ struct TCancelOperationResp { 1: required TStatus status } - -// CloseOperation() -// -// Given an operation in the FINISHED, CANCELED, -// or ERROR states, CloseOperation() will free -// all of the resources which were allocated on -// the server to service the operation. +/** + * Request to close an operation. If the specified operation is in the FINISHED, CANCELED or ERROR + * states this will free all the resources which were allocated on the server for this operation. + **/ struct TCloseOperationReq { + /** Operation to close. */ 1: required TOperationHandle operationHandle } @@ -1009,126 +1071,160 @@ struct TCloseOperationResp { } -// GetResultSetMetadata() -// -// Retrieves schema information for the specified operation +/** + * Request to retrieve the schema information for a specified operation. + **/ struct TGetResultSetMetadataReq { - // Operation for which to fetch result set schema information + /** Operation for which to fetch result set schema information. */ 1: required TOperationHandle operationHandle } +/** + * Response to a request to retrieve schema information for an operation. + **/ struct TGetResultSetMetadataResp { + /** status of the request */ 1: required TStatus status + + /** schema of the result of the specified operation */ 2: optional TTableSchema schema } enum TFetchOrientation { - // Get the next rowset. The fetch offset is ignored. + /** Get the next rowset. The fetch offset is ignored. */ FETCH_NEXT, - // Get the previous rowset. The fetch offset is ignored. - // NOT SUPPORTED + /** + * Get the previous rowset. The fetch offset is ignored. + * NOT SUPPORTED. + **/ FETCH_PRIOR, - // Return the rowset at the given fetch offset relative - // to the curren rowset. - // NOT SUPPORTED + /** + * Return the rowset at the given fetch offset relative to the curren rowset. + * NOT SUPPORTED. + **/ FETCH_RELATIVE, - // Return the rowset at the specified fetch offset. - // NOT SUPPORTED + /** + * Return the rowset at the specified fetch offset. + * NOT SUPPORTED. + **/ FETCH_ABSOLUTE, - // Get the first rowset in the result set. + /** Get the first rowset in the result set. */ FETCH_FIRST, - // Get the last rowset in the result set. - // NOT SUPPORTED + /** + * Get the last rowset in the result set. + * NOT SUPPORTED. + **/ FETCH_LAST } -// FetchResults() -// -// Fetch rows from the server corresponding to -// a particular OperationHandle. +/** + * Request to fetch rows from the server for a particular OperationHandle. + **/ struct TFetchResultsReq { - // Operation from which to fetch results. + /** Operation from which to fetch results. */ 1: required TOperationHandle operationHandle - // The fetch orientation. For V1 this must be either - // FETCH_NEXT or FETCH_FIRST. Defaults to FETCH_NEXT. + /** + * The fetch orientation. + * For V1 this must be either FETCH_NEXT or FETCH_FIRST. + * Defaults to FETCH_NEXT. + **/ 2: required TFetchOrientation orientation = TFetchOrientation.FETCH_NEXT - - // Max number of rows that should be returned in - // the rowset. + + /** Max number of rows that should be returned in the rowset. */ 3: required i64 maxRows - // The type of a fetch results request. 0 represents Query output. 1 represents Log + /** The type of a fetch results request. 0 represents Query output. 1 represents Log */ 4: optional i16 fetchType = 0 } +/** + * Response to a request to fetch rows. + **/ struct TFetchResultsResp { + /** status of the request */ 1: required TStatus status - // TRUE if there are more rows left to fetch from the server. + /** TRUE if there are more rows left to fetch from the server. */ 2: optional bool hasMoreRows - // The rowset. This is optional so that we have the - // option in the future of adding alternate formats for - // representing result set data, e.g. delimited strings, - // binary encoded, etc. + /** + * The rowset. + * This is optional so that we have the option in the future of adding alternate formats for + * representing result set data, e.g. delimited strings, binary encoded, etc. + **/ 3: optional TRowSet results } -// GetDelegationToken() -// Retrieve delegation token for the current user +/** + * Request to get a delegation token for the current user. + **/ struct TGetDelegationTokenReq { - // session handle + /** session handle */ 1: required TSessionHandle sessionHandle - // userid for the proxy user + /** user id for the proxy user */ 2: required string owner - // designated renewer userid + /** designated renewer userid */ 3: required string renewer } +/** + * Response to the request for a delegation token. + **/ struct TGetDelegationTokenResp { - // status of the request + /** status of the request */ 1: required TStatus status - // delegation token string + /** delegation token string */ 2: optional string delegationToken } -// CancelDelegationToken() -// Cancel the given delegation token + + +/** + * Request to cancel a delegation token. + **/ struct TCancelDelegationTokenReq { - // session handle + /** session handle */ 1: required TSessionHandle sessionHandle - // delegation token to cancel + /** delegation token to cancel */ 2: required string delegationToken } +/** + * Response to the request to cancel a delegation token. + **/ struct TCancelDelegationTokenResp { - // status of the request + /** status of the request */ 1: required TStatus status } -// RenewDelegationToken() -// Renew the given delegation token + +/** + * Request to renew a delegation token. + **/ struct TRenewDelegationTokenReq { - // session handle + /** session handle */ 1: required TSessionHandle sessionHandle - // delegation token to renew + /** delegation token to renew */ 2: required string delegationToken } +/** + * Response to the request to renew a delegation token. + **/ struct TRenewDelegationTokenResp { - // status of the request + /** status of the request */ 1: required TStatus status } @@ -1144,6 +1240,12 @@ service TCLIService { TGetTypeInfoResp GetTypeInfo(1:TGetTypeInfoReq req); + /** + * This is not currently implemented in HiveServer2 as the concept of a catalog does not exist + * in Hive. If you want a list of all databases use GetSchemas instead with an empty catalogName. + * + * This will always return an empty ResultSet. + */ TGetCatalogsResp GetCatalogs(1:TGetCatalogsReq req); TGetSchemasResp GetSchemas(1:TGetSchemasReq req); @@ -1157,7 +1259,7 @@ service TCLIService { TGetFunctionsResp GetFunctions(1:TGetFunctionsReq req); TGetOperationStatusResp GetOperationStatus(1:TGetOperationStatusReq req); - + TCancelOperationResp CancelOperation(1:TCancelOperationReq req); TCloseOperationResp CloseOperation(1:TCloseOperationReq req); @@ -1170,5 +1272,6 @@ service TCLIService { TCancelDelegationTokenResp CancelDelegationToken(1:TCancelDelegationTokenReq req); + /** Renew the given delegation token */ TRenewDelegationTokenResp RenewDelegationToken(1:TRenewDelegationTokenReq req); }