diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
index 8bff2a9..2d97f00 100644
--- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
+++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
@@ -385,6 +385,9 @@
METADATA_EXPORT_LOCATION("hive.metadata.export.location", ""),
MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true),
+ METASTORE_RETENTION_INTERVAL("hive.metastore.retention.interval.minutes", -1),
+ METASTORE_RETENTION_DATABASES("hive.metastore.retention.databases", ""),
+
// CLI
CLIIGNOREERRORS("hive.cli.errors.ignore", false),
CLIPRINTCURRENTDB("hive.cli.print.current.db", false),
diff --git conf/hive-default.xml.template conf/hive-default.xml.template
index 4944dfc..cf70e64 100644
--- conf/hive-default.xml.template
+++ conf/hive-default.xml.template
@@ -284,6 +284,18 @@
+ hive.metastore.retention.interval.minutes
+ -1
+ Check interval for retention in minutes. Minus value disables it.
+
+
+
+ hive.metastore.retention.databases
+
+ List of database names comma separated. Empty string for all databases.
+
+
+
hive.metastore.partition.name.whitelist.pattern
Partition names will be checked against this regex pattern and rejected if not matched.
diff --git metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 911c997..6d220d2 100644
--- metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -19,12 +19,14 @@
package org.apache.hadoop.hive.metastore;
import static org.apache.commons.lang.StringUtils.join;
+import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.*;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
+import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
@@ -34,6 +36,7 @@
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@@ -228,11 +231,89 @@ public void setConf(Configuration conf) {
} else {
LOG.info("Initialized ObjectStore");
}
+ startRetention(hiveConf);
} finally {
pmfPropLock.unlock();
}
}
+ private static Thread retention;
+
+ private void startRetention(final Configuration conf) {
+ if (retention != null) {
+ return;
+ }
+ final int interval = HiveConf.getIntVar(conf, METASTORE_RETENTION_INTERVAL);
+ if (interval < 0) {
+ return;
+ }
+ final String databaseNames = HiveConf.getVar(conf, METASTORE_RETENTION_DATABASES).trim();
+ final String[] databases = databaseNames.isEmpty() ? null : databaseNames.split(",");
+ retention = new Thread("Retention [" + interval + "]") {
+ public void run() {
+ int sleep = interval;
+ while (true) {
+ try {
+ Thread.sleep(TimeUnit.MINUTES.toMillis(sleep));
+ } catch (InterruptedException e) {
+ // ignore
+ }
+ try {
+ if (checkTTL(databases) > 0) {
+ sleep = interval;
+ continue;
+ }
+ } catch (MetaException e) {
+ LOG.warn("Failed to access metastore", e);
+ }
+ sleep = Math.max(interval * 10, sleep <<= 1);
+ }
+ }
+ };
+ retention.setDaemon(true);
+ retention.start();
+ }
+
+ private int checkTTL(String[] databases) throws MetaException {
+ Map targets = getRetentionTargets(databases);
+ for (Entry target : targets.entrySet()) {
+ String[] names = target.getKey();
+ Integer[] base = target.getValue();
+
+ String name;
+ if (names.length == 2) {
+ name = "table " + names[0] + "." + names[1];
+ } else {
+ name = "partition " + names[0] + "." + names[1] + "." + names[2];
+ }
+ String time = base[1] + " seconds";
+ if (base[1] > 60 ) {
+ if (base[1] > 60 * 60) {
+ if (base[1] > 60 * 60 * 24) {
+ time += "(about " + base[1] / 60 / 60 / 24 + "+ days)";
+ } else {
+ time += "(about " + base[1] / 60 / 60 + "+ hours)";
+ }
+ } else {
+ time += "(about " + base[1] / 60 + "+ minutes)";
+ }
+ }
+ LOG.warn("Dropping " + name + " by retention policy (Created: " +
+ new Date(base[0] * 1000l) + ", Retention on: " + time);
+ try {
+ if (names.length == 2) {
+ dropTable(names[0], names[1]);
+ } else {
+ List values = Warehouse.getPartValuesFromPartName(names[2]);
+ dropPartition(names[0], names[1], values);
+ }
+ } catch (Exception e) {
+ LOG.warn("Failed to drop " + name + " (retention)", e);
+ }
+ }
+ return targets.size();
+ }
+
private ClassLoader classLoader;
{
classLoader = Thread.currentThread().getContextClassLoader();
@@ -900,6 +981,9 @@ private MTable getMTable(String db, String table) {
query.setUnique(true);
mtbl = (MTable) query.execute(table, db);
pm.retrieve(mtbl);
+ if (mtbl != null) {
+ mtbl.setLastAccessTime(nowInSeconds());
+ }
commited = commitTransaction();
} finally {
if (!commited) {
@@ -909,6 +993,78 @@ private MTable getMTable(String db, String table) {
return mtbl;
}
+ private int nowInSeconds() {
+ return (int) (System.currentTimeMillis() / 1000);
+ }
+
+ private Map getRetentionTargets(String[] databases) throws MetaException {
+ boolean committed = false;
+ Map targets = new HashMap();
+ try {
+ openTransaction();
+ int current = nowInSeconds(); // todo: use db time
+ for (Object t : getTableRetentions(databases, current)) {
+ MTable mTable = (MTable)t;
+ MDatabase database = mTable.getDatabase();
+ String[] name = new String[] {database.getName(), mTable.getTableName()};
+ targets.put(name, new Integer[]{mTable.getCreateTime(), mTable.getRetention()});
+ }
+ for (Object p : getPartitionRetentions(databases, current)) {
+ MPartition mPart = (MPartition)p;
+ MTable mTable = mPart.getTable();
+ MDatabase database = mTable.getDatabase();
+ List partCols = convertToFieldSchemas(mTable.getPartitionKeys());
+ String partName = Warehouse.makePartName(partCols, mPart.getValues());
+ String[] name = new String[] {database.getName(), mTable.getTableName(), partName};
+ targets.put(name, new Integer[]{mPart.getCreateTime(), mTable.getRetention()});
+ }
+ committed = commitTransaction();
+ } finally {
+ if (!committed) {
+ rollbackTransaction();
+ }
+ }
+ return targets;
+ }
+
+ private Collection getTableRetentions(String[] databases, int current) {
+ Query query = pm.newQuery(MTable.class);
+ StringBuilder builder = new StringBuilder();
+ builder.append("partitionKeys == null && ");
+ builder.append("retention > 0 && createTime + retention < " + current);
+ if (databases != null && databases.length > 0) {
+ builder.append(" && database.name in (");
+ for (int i = 0; i < databases.length; i++) {
+ if (i > 0) {
+ builder.append(", ");
+ }
+ builder.append("\"" + databases[i] + "\"");
+ }
+ builder.append(")");
+ }
+ query.setFilter(builder.toString());
+ return (Collection) query.execute();
+ }
+
+ private Collection getPartitionRetentions(String[] databases, int current) {
+ Query query = pm.newQuery(MPartition.class);
+ StringBuilder builder = new StringBuilder();
+ builder.append("table.partitionKeys != null && ");
+ builder.append("table.retention > 0 && createTime + table.retention < " + current);
+ if (databases != null && databases.length > 0) {
+ builder.append(" && table.database.name in (");
+ for (int i = 0; i < databases.length; i++) {
+ if (i > 0) {
+ builder.append(", ");
+ }
+ builder.append("\"" + databases[i] + "\"");
+ }
+ builder.append(")");
+ }
+ query.setFilter(builder.toString());
+ return (Collection) query.execute();
+ }
+
@Override
public List getTableObjectsByName(String db, List tbl_names)
throws MetaException, UnknownDBException {
@@ -1368,6 +1524,9 @@ private MPartition getMPartition(String dbName, String tableName,
query.setUnique(true);
mpart = (MPartition) query.execute(tableName, dbName, name);
pm.retrieve(mpart);
+ if (mpart != null) {
+ mpart.setLastAccessTime(nowInSeconds());
+ }
commited = commitTransaction();
} finally {
if (!commited) {
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 4d8e10c..56cbd48 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -3722,6 +3722,10 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException {
while (keyItr.hasNext()) {
tbl.getTTable().getParameters().remove(keyItr.next());
}
+ } else if (alterTbl.getOp() == AlterTableTypes.SETRETENTION) {
+ tbl.getTTable().setRetention(alterTbl.getRetentionSeconds());
+ } else if (alterTbl.getOp() == AlterTableTypes.UNSETRETENTION) {
+ tbl.getTTable().setRetention(0);
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
if (part != null) {
part.getTPartition().getSd().getSerdeInfo().getParameters().putAll(
@@ -4036,9 +4040,12 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc
* HiveConf of session
*/
private boolean updateModifiedParameters(Map params, HiveConf conf) throws HiveException {
- String user = null;
- user = SessionState.getUserFromAuthenticator();
- params.put("last_modified_by", user);
+ String user = SessionState.getUserFromAuthenticator();
+ if (user != null) {
+ params.put("last_modified_by", user);
+ } else {
+ params.remove("last_modified_by");
+ }
params.put("last_modified_time", Long.toString(System.currentTimeMillis() / 1000));
return true;
}
diff --git ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
index 26836b6..05f7823 100644
--- ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
+++ ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
@@ -167,6 +167,7 @@ public static WriteType determineAlterTableWriteType(AlterTableDesc.AlterTableTy
case ADDFILEFORMAT:
case ADDSERDE:
case DROPPROPS:
+ case UNSETRETENTION:
case REPLACECOLS:
case ARCHIVE:
case UNARCHIVE:
@@ -184,6 +185,7 @@ public static WriteType determineAlterTableWriteType(AlterTableDesc.AlterTableTy
case ADDPARTITION:
case ADDSERDEPROPS:
+ case SETRETENTION:
case ADDPROPS: return WriteType.DDL_SHARED;
case COMPACT:
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
index 640b6b3..cea42d6 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
@@ -36,6 +36,7 @@
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import org.antlr.runtime.tree.CommonTree;
import org.antlr.runtime.tree.Tree;
@@ -406,6 +407,12 @@ public void analyzeInternal(ASTNode ast) throws SemanticException {
case HiveParser.TOK_DROPTABLE_PROPERTIES:
analyzeAlterTableProps(ast, false, true);
break;
+ case HiveParser.TOK_ALTERTABLE_RETENTION:
+ analyzeAlterTableRetention(ast, false);
+ break;
+ case HiveParser.TOK_DROPTABLE_RETENTION:
+ analyzeAlterTableRetention(ast, true);
+ break;
case HiveParser.TOK_ALTERINDEX_REBUILD:
analyzeAlterIndexRebuild(ast);
break;
@@ -1277,6 +1284,44 @@ private void analyzeAlterTableProps(ASTNode ast, boolean expectView, boolean isU
alterTblDesc), conf));
}
+ private void analyzeAlterTableRetention(ASTNode ast, boolean isUnset) throws SemanticException {
+
+ String tableName = getUnescapedName((ASTNode) ast.getChild(0));
+ AlterTableDesc alterTblDesc = null;
+ if (isUnset) {
+ alterTblDesc = new AlterTableDesc(AlterTableTypes.UNSETRETENTION);
+ } else {
+ long time = Long.valueOf(ast.getChild(1).getText());
+ TimeUnit unit = toTimunit(ast.getChild(2).getText());
+
+ long seconds = unit.toSeconds(time);
+ if (seconds > Integer.MAX_VALUE) {
+ throw new SemanticException("Too big: " + seconds + " seconds");
+ }
+ alterTblDesc = new AlterTableDesc(AlterTableTypes.SETRETENTION);
+ alterTblDesc.setRetentionSeconds((int)seconds);
+ }
+ alterTblDesc.setOldName(tableName);
+
+ addInputsOutputsAlterTable(tableName, null, alterTblDesc);
+
+ rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
+ alterTblDesc), conf));
+ }
+
+ private TimeUnit toTimunit(String unit) {
+ if (unit.equalsIgnoreCase("days")) {
+ return TimeUnit.DAYS;
+ } else if (unit.equalsIgnoreCase("hours")) {
+ return TimeUnit.HOURS;
+ } else if (unit.equalsIgnoreCase("min") || unit.equalsIgnoreCase("minutes")) {
+ return TimeUnit.MINUTES;
+ } else if (unit.equalsIgnoreCase("sec") || unit.equalsIgnoreCase("seconds")) {
+ return TimeUnit.SECONDS;
+ }
+ throw new IllegalArgumentException("Invalid time unit " + unit);
+ }
+
private void analyzeAlterTableSerdeProps(ASTNode ast, String tableName,
HashMap partSpec)
throws SemanticException {
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
index 412a046..282e9de 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
@@ -195,6 +195,7 @@ KW_DBPROPERTIES: 'DBPROPERTIES';
KW_LIMIT: 'LIMIT';
KW_SET: 'SET';
KW_UNSET: 'UNSET';
+KW_RETENTION: 'RETENTION';
KW_TBLPROPERTIES: 'TBLPROPERTIES';
KW_IDXPROPERTIES: 'IDXPROPERTIES';
KW_VALUE_TYPE: '$VALUE$';
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
index f934ac4..4651190 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
@@ -127,6 +127,7 @@ TOK_CREATEINDEX_INDEXTBLNAME;
TOK_DEFERRED_REBUILDINDEX;
TOK_DROPINDEX;
TOK_DROPTABLE_PROPERTIES;
+TOK_DROPTABLE_RETENTION;
TOK_LIKETABLE;
TOK_DESCTABLE;
TOK_DESCFUNCTION;
@@ -150,6 +151,7 @@ TOK_TABLE_PARTITION;
TOK_ALTERTABLE_FILEFORMAT;
TOK_ALTERTABLE_LOCATION;
TOK_ALTERTABLE_PROPERTIES;
+TOK_ALTERTABLE_RETENTION;
TOK_ALTERTABLE_CHANGECOL_AFTER_POSITION;
TOK_ALTERINDEX_REBUILD;
TOK_ALTERINDEX_PROPERTIES;
@@ -385,6 +387,7 @@ import java.util.HashMap;
xlateMap.put("KW_PARTITIONS", "PARTITIONS");
xlateMap.put("KW_TABLE", "TABLE");
xlateMap.put("KW_TABLES", "TABLES");
+ xlateMap.put("KW_RETENTION", "RETENTION");
xlateMap.put("KW_TBLPROPERTIES", "TBLPROPERTIES");
xlateMap.put("KW_SHOW", "SHOW");
xlateMap.put("KW_MSCK", "MSCK");
@@ -946,6 +949,7 @@ alterTableStatementSuffix
| alterStatementSuffixArchive
| alterStatementSuffixUnArchive
| alterStatementSuffixProperties
+ | alterStatementSuffixRetention
| alterTblPartitionStatement
| alterStatementSuffixSkewedby
| alterStatementSuffixExchangePartition
@@ -1093,6 +1097,15 @@ alterStatementSuffixProperties
-> ^(TOK_DROPTABLE_PROPERTIES $name tableProperties ifExists?)
;
+alterStatementSuffixRetention
+@init { pushMsg("alter retention statement", state); }
+@after { popMsg(state); }
+ : identifier KW_SET KW_RETENTION Number Identifier
+ -> ^(TOK_ALTERTABLE_RETENTION identifier Number Identifier)
+ | identifier KW_UNSET KW_RETENTION
+ -> ^(TOK_DROPTABLE_RETENTION identifier)
+ ;
+
alterViewSuffixProperties
@init { pushMsg("alter view properties statement", state); }
@after { popMsg(state); }
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
index b6f3748..9b43d61 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
@@ -180,6 +180,8 @@ public static BaseSemanticAnalyzer get(HiveConf conf, ASTNode tree)
case HiveParser.TOK_ALTERTABLE_ADDPARTS:
case HiveParser.TOK_ALTERTABLE_PROPERTIES:
case HiveParser.TOK_DROPTABLE_PROPERTIES:
+ case HiveParser.TOK_ALTERTABLE_RETENTION:
+ case HiveParser.TOK_DROPTABLE_RETENTION:
case HiveParser.TOK_ALTERTABLE_SERIALIZER:
case HiveParser.TOK_ALTERTABLE_SERDEPROPERTIES:
case HiveParser.TOK_ALTERTABLE_PARTCOLTYPE:
diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
index 20d863b..b097ad0 100644
--- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
+++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java
@@ -48,7 +48,8 @@
ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN, ADDPARTITION,
TOUCH, ARCHIVE, UNARCHIVE, ALTERPROTECTMODE, ALTERPARTITIONPROTECTMODE,
ALTERLOCATION, DROPPARTITION, RENAMEPARTITION, ADDSKEWEDBY, ALTERSKEWEDLOCATION,
- ALTERBUCKETNUM, ALTERPARTITION, COMPACT
+ ALTERBUCKETNUM, ALTERPARTITION, COMPACT,
+ SETRETENTION, UNSETRETENTION
}
public static enum ProtectModeType {
@@ -89,6 +90,8 @@
boolean isDropIfExists = false;
boolean isTurnOffSorting = false;
+ int retentionSeconds;
+
public AlterTableDesc() {
}
@@ -702,5 +705,13 @@ public boolean getIsDropIfExists() {
return isDropIfExists;
}
+ public int getRetentionSeconds() {
+ return retentionSeconds;
+ }
+
+ public void setRetentionSeconds(int retentionSeconds) {
+ this.retentionSeconds = retentionSeconds;
+ }
+
}
diff --git ql/src/test/queries/clientpositive/alter_table_retention.q ql/src/test/queries/clientpositive/alter_table_retention.q
new file mode 100644
index 0000000..d521755
--- /dev/null
+++ ql/src/test/queries/clientpositive/alter_table_retention.q
@@ -0,0 +1,20 @@
+-- test table
+create table test_table (id int, query string, name string);
+describe formatted test_table;
+
+alter table test_table set retention 120 sec;
+describe formatted test_table;
+
+alter table test_table set retention 30 min;
+describe formatted test_table;
+
+alter table test_table set retention 12 hours;
+describe formatted test_table;
+
+alter table test_table set retention 7 days;
+describe formatted test_table;
+
+alter table test_table unset retention;
+describe formatted test_table;
+
+drop table test_table;
diff --git ql/src/test/results/clientpositive/alter_table_retention.q.out ql/src/test/results/clientpositive/alter_table_retention.q.out
new file mode 100644
index 0000000..464fc5c
--- /dev/null
+++ ql/src/test/results/clientpositive/alter_table_retention.q.out
@@ -0,0 +1,279 @@
+PREHOOK: query: -- test table
+create table test_table (id int, query string, name string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+POSTHOOK: query: -- test table
+create table test_table (id int, query string, name string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@test_table
+PREHOOK: query: describe formatted test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: describe formatted test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+# col_name data_type comment
+
+id int
+query string
+name string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Protect Mode: None
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: alter table test_table set retention 120 sec
+PREHOOK: type: null
+PREHOOK: Input: default@test_table
+PREHOOK: Output: default@test_table
+POSTHOOK: query: alter table test_table set retention 120 sec
+POSTHOOK: type: null
+POSTHOOK: Input: default@test_table
+POSTHOOK: Output: default@test_table
+PREHOOK: query: describe formatted test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: describe formatted test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+# col_name data_type comment
+
+id int
+query string
+name string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Protect Mode: None
+Retention: 120
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE false
+#### A masked pattern was here ####
+ numFiles 0
+ numRows -1
+ rawDataSize -1
+ totalSize 0
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: alter table test_table set retention 30 min
+PREHOOK: type: null
+PREHOOK: Input: default@test_table
+PREHOOK: Output: default@test_table
+POSTHOOK: query: alter table test_table set retention 30 min
+POSTHOOK: type: null
+POSTHOOK: Input: default@test_table
+POSTHOOK: Output: default@test_table
+PREHOOK: query: describe formatted test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: describe formatted test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+# col_name data_type comment
+
+id int
+query string
+name string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Protect Mode: None
+Retention: 1800
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE false
+#### A masked pattern was here ####
+ numFiles 0
+ numRows -1
+ rawDataSize -1
+ totalSize 0
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: alter table test_table set retention 12 hours
+PREHOOK: type: null
+PREHOOK: Input: default@test_table
+PREHOOK: Output: default@test_table
+POSTHOOK: query: alter table test_table set retention 12 hours
+POSTHOOK: type: null
+POSTHOOK: Input: default@test_table
+POSTHOOK: Output: default@test_table
+PREHOOK: query: describe formatted test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: describe formatted test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+# col_name data_type comment
+
+id int
+query string
+name string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Protect Mode: None
+Retention: 43200
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE false
+#### A masked pattern was here ####
+ numFiles 0
+ numRows -1
+ rawDataSize -1
+ totalSize 0
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: alter table test_table set retention 7 days
+PREHOOK: type: null
+PREHOOK: Input: default@test_table
+PREHOOK: Output: default@test_table
+POSTHOOK: query: alter table test_table set retention 7 days
+POSTHOOK: type: null
+POSTHOOK: Input: default@test_table
+POSTHOOK: Output: default@test_table
+PREHOOK: query: describe formatted test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: describe formatted test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+# col_name data_type comment
+
+id int
+query string
+name string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Protect Mode: None
+Retention: 604800
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE false
+#### A masked pattern was here ####
+ numFiles 0
+ numRows -1
+ rawDataSize -1
+ totalSize 0
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: alter table test_table unset retention
+PREHOOK: type: null
+PREHOOK: Input: default@test_table
+PREHOOK: Output: default@test_table
+POSTHOOK: query: alter table test_table unset retention
+POSTHOOK: type: null
+POSTHOOK: Input: default@test_table
+POSTHOOK: Output: default@test_table
+PREHOOK: query: describe formatted test_table
+PREHOOK: type: DESCTABLE
+PREHOOK: Input: default@test_table
+POSTHOOK: query: describe formatted test_table
+POSTHOOK: type: DESCTABLE
+POSTHOOK: Input: default@test_table
+# col_name data_type comment
+
+id int
+query string
+name string
+
+# Detailed Table Information
+Database: default
+#### A masked pattern was here ####
+Protect Mode: None
+Retention: 0
+#### A masked pattern was here ####
+Table Type: MANAGED_TABLE
+Table Parameters:
+ COLUMN_STATS_ACCURATE false
+#### A masked pattern was here ####
+ numFiles 0
+ numRows -1
+ rawDataSize -1
+ totalSize 0
+#### A masked pattern was here ####
+
+# Storage Information
+SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
+InputFormat: org.apache.hadoop.mapred.TextInputFormat
+OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
+Compressed: No
+Num Buckets: -1
+Bucket Columns: []
+Sort Columns: []
+Storage Desc Params:
+ serialization.format 1
+PREHOOK: query: drop table test_table
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@test_table
+PREHOOK: Output: default@test_table
+POSTHOOK: query: drop table test_table
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@test_table
+POSTHOOK: Output: default@test_table