diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java index 9ca8b0007e..a06677adf1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -98,7 +98,7 @@ // Keeps track of scratch directories created for different scheme/authority private final Map fsScratchDirs = new HashMap(); - private final Configuration conf; + private Configuration conf; protected int pathid = 10000; protected ExplainConfiguration explainConfig = null; protected String cboInfo; @@ -923,6 +923,10 @@ public ContentSummary getCS(String path) { return pathToCS; } + public void setConf(Configuration conf) { + this.conf = conf; + } + public Configuration getConf() { return conf; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index f761fffbf1..2e6e0479b0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -156,7 +156,7 @@ private int maxRows = 100; ByteStream.Output bos = new ByteStream.Output(); - private final HiveConf conf; + private HiveConf conf; private final boolean isParallelEnabled; private DataInput resStream; private Context ctx; @@ -650,7 +650,8 @@ public void run() { if (conf.getBoolVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED)) { cacheUsage = sem.getCacheUsage(); } - + // The conf may have been modified during semantic analysis + conf = (HiveConf) ctx.getConf(); // validate the plan sem.validate(); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java index f329570cc9..5ed194dfba 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java @@ -161,9 +161,7 @@ public void initialize(QueryState queryState, QueryPlan queryPlan, DriverContext this.queryPlan = queryPlan; setInitialized(); this.queryState = queryState; - if (null == this.conf) { - this.conf = queryState.getConf(); - } + this.conf = queryState.getConf(); this.driverContext = driverContext; console = new LogHelper(LOG); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 009a890888..49c355be01 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2299,7 +2299,6 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType assert tbl.getPath() != null : "null==getPath() for " + tbl.getTableName(); boolean isMmTable = AcidUtils.isInsertOnlyTable(tbl); boolean isFullAcidTable = AcidUtils.isFullAcidTable(tbl); - HiveConf sessionConf = SessionState.getSessionConf(); if (conf.getBoolVar(ConfVars.FIRE_EVENTS_FOR_DML) && !tbl.isTemporary()) { newFiles = Collections.synchronizedList(new ArrayList()); } @@ -2341,11 +2340,11 @@ else if(!isAcidIUDoperation && isFullAcidTable) { boolean isAutopurge = "true".equalsIgnoreCase(tbl.getProperty("auto.purge")); // TODO: this should never run for MM tables anymore. Remove the flag, and maybe the filter? replaceFiles(tblPath, loadPath, destPath, tblPath, - sessionConf, isSrcLocal, isAutopurge, newFiles, filter, isMmTable?true:false, !tbl.isTemporary()); + conf, isSrcLocal, isAutopurge, newFiles, filter, isMmTable?true:false, !tbl.isTemporary()); } else { try { - FileSystem fs = tbl.getDataLocation().getFileSystem(sessionConf); - copyFiles(sessionConf, loadPath, destPath, fs, isSrcLocal, isAcidIUDoperation, + FileSystem fs = tbl.getDataLocation().getFileSystem(conf); + copyFiles(conf, loadPath, destPath, fs, isSrcLocal, isAcidIUDoperation, loadFileType == LoadFileType.OVERWRITE_EXISTING, newFiles, tbl.getNumBuckets() > 0 ? true : false, isFullAcidTable); } catch (IOException e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java index bdecbaf144..6981c1db42 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java @@ -235,6 +235,8 @@ private void initReplLoad(ASTNode ast) throws SemanticException { } catch (HiveException e) { throw new SemanticException(e); } + // Set the updated config in the Context (which gets passed to the Tasks) + ctx.setConf(conf); } break; default: