diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index ce6ad6b..182902e 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -18,7 +18,26 @@ package org.apache.hadoop.hive.conf; -import com.google.common.base.Joiner; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintStream; +import java.net.URL; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import javax.security.auth.login.LoginException; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; @@ -38,26 +57,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.security.auth.login.LoginException; - -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStream; -import java.io.PrintStream; -import java.net.URL; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Properties; -import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; +import com.google.common.base.Joiner; /** * Hive Configuration. @@ -252,9 +252,6 @@ public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { // QL execution stuff SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""), PLAN("hive.exec.plan", "", ""), - PLAN_SERIALIZATION("hive.plan.serialization.format", "kryo", - "Query plan format serialization between client and task nodes. \n" + - "Two supported values are : kryo and javaXML. Kryo is default."), STAGINGDIR("hive.exec.stagingdir", ".hive-staging", "Directory name that will be created inside table locations in order to support HDFS encryption. " + "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " + diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 5b2c8c2..f805087 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -56,11 +56,8 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.hive.ql.exec.SerializationUtilities; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -109,6 +106,8 @@ import org.apache.zookeeper.Watcher; import org.apache.zookeeper.ZooKeeper; import org.junit.Assert; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableList; @@ -1328,49 +1327,6 @@ public int checkParseResults(String tname, ASTNode tree) throws Exception { } } - private final Pattern[] xmlPlanMask = toPattern(new String[] { - "", - ".*/tmp/.*", - "file:.*", - "pfile:.*", - "[0-9]{10}", - "/.*/warehouse/.*" - }); - - public int checkPlan(String tname, List> tasks) throws Exception { - - if (tasks == null) { - throw new Exception("Plan is null"); - } - File planDir = new File(outDir, "plan"); - String planFile = outPath(planDir.toString(), tname + ".xml"); - - File outf = null; - outf = new File(logDir); - outf = new File(outf, tname.concat(".xml")); - - FileOutputStream ofs = new FileOutputStream(outf); - try { - conf.set(HiveConf.ConfVars.PLAN_SERIALIZATION.varname, "javaXML"); - for (Task plan : tasks) { - SerializationUtilities.serializePlan(plan, ofs, conf); - } - ofs.close(); - fixXml4JDK7(outf.getPath()); - maskPatterns(xmlPlanMask, outf.getPath()); - - int exitVal = executeDiffCommand(outf.getPath(), planFile, true, false); - - if (exitVal != 0 && overWrite) { - exitVal = overwriteResults(outf.getPath(), planFile); - } - return exitVal; - } finally { - conf.set(HiveConf.ConfVars.PLAN_SERIALIZATION.varname, "kryo"); - IOUtils.closeQuietly(ofs); - } - } - /** * Given the current configurations (e.g., hadoop version and execution mode), return * the correct file name to compare with the current test run output. @@ -1406,103 +1362,6 @@ public String outPath(String outDir, String testName) { return ret; } - /** - * Fix the XML generated by JDK7 which is slightly different from what's generated by JDK6, - * causing 40+ test failures. There are mainly two problems: - * - * 1. object element's properties, id and class, are in reverse order, i.e. - * - * which needs to be fixed to - * - * 2. JDK introduces Enum as class, i.e. - * - * org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator$Mode - * which needs to be fixed to - * - * - * Though not elegant, this allows these test cases to pass until we have a better serialization mechanism. - * - * Did I mention this is test code? - * - * @param fname the name of the file to fix - * @throws Exception in case of IO error - */ - private static void fixXml4JDK7(String fname) throws Exception { - String version = System.getProperty("java.version"); - if (!version.startsWith("1.7")) { - return; - } - - BufferedReader in = new BufferedReader(new FileReader(fname)); - BufferedWriter out = new BufferedWriter(new FileWriter(fname + ".orig")); - String line = null; - while (null != (line = in.readLine())) { - out.write(line); - out.write('\n'); - } - in.close(); - out.close(); - - in = new BufferedReader(new FileReader(fname + ".orig")); - out = new BufferedWriter(new FileWriter(fname)); - - while (null != (line = in.readLine())) { - if (line.indexOf("")); - int start = line.indexOf("<" + name + ">") + name.length() + 2; - int end = line.indexOf(""); - return line.substring(start, end); - } - private Pattern[] toPattern(String[] patternStrs) { Pattern[] patterns = new Pattern[patternStrs.length]; for (int i = 0; i < patternStrs.length; i++) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFUtils.java index 721fbaa..7e5f950 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFUtils.java @@ -18,34 +18,11 @@ package org.apache.hadoop.hive.ql.exec; -import java.beans.BeanInfo; -import java.beans.Encoder; -import java.beans.ExceptionListener; -import java.beans.Expression; -import java.beans.IntrospectionException; -import java.beans.Introspector; -import java.beans.PersistenceDelegate; -import java.beans.PropertyDescriptor; -import java.beans.Statement; -import java.beans.XMLDecoder; -import java.beans.XMLEncoder; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Stack; -import org.antlr.runtime.CommonToken; -import org.antlr.runtime.tree.BaseTree; -import org.antlr.runtime.tree.CommonTree; -import org.apache.hadoop.hive.ql.exec.SerializationUtilities.EnumDelegate; -import org.apache.hadoop.hive.ql.parse.ASTNode; -import org.apache.hadoop.hive.ql.parse.WindowingSpec.Direction; -import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; - public class PTFUtils { public static String toString(List col) @@ -127,162 +104,5 @@ public void remove() public static abstract class Predicate { public abstract boolean apply(T obj); - }; - - - - /* - * serialization functions - */ - public static void serialize(OutputStream out, Object o) - { - XMLEncoder e = new XMLEncoder(out); - e.setExceptionListener(new EL()); - PTFUtils.addPersistenceDelegates(e); - e.writeObject(o); - e.close(); - } - - public static Object deserialize(InputStream in1) - { - XMLDecoder d = null; - try - { - d = new XMLDecoder(in1, null, null); - return d.readObject(); - } - finally - { - if (null != d) - { - d.close(); - } - } - } - - public static void addPersistenceDelegates(XMLEncoder e) - { - addAntlrPersistenceDelegates(e); - addHivePersistenceDelegates(e); - addEnumDelegates(e); - } - - public static void addEnumDelegates(XMLEncoder e) - { - e.setPersistenceDelegate(Direction.class, new EnumDelegate()); - } - - public static void addAntlrPersistenceDelegates(XMLEncoder e) - { - e.setPersistenceDelegate(ASTNode.class, new PersistenceDelegate() - { - - @Override - protected Expression instantiate(Object oldInstance, Encoder out) - { - return new Expression(oldInstance, oldInstance.getClass(), - "new", new Object[] - { ((ASTNode) oldInstance).getToken() }); - } - }); - e.setPersistenceDelegate(CommonTree.class, new PersistenceDelegate() - { - @Override - protected Expression instantiate(Object oldInstance, Encoder out) - { - return new Expression(oldInstance, oldInstance.getClass(), - "new", new Object[] - { ((CommonTree) oldInstance).getToken() }); - } - }); - e.setPersistenceDelegate(BaseTree.class, new PersistenceDelegate() - { - @Override - protected Expression instantiate(Object oldInstance, Encoder out) - { - return new Expression(oldInstance, oldInstance.getClass(), - "new", new Object[] - {}); - } - - @Override - @SuppressWarnings("rawtypes") - protected void initialize(Class type, Object oldInstance, - Object newInstance, Encoder out) - { - super.initialize(type, oldInstance, newInstance, out); - - BaseTree t = (BaseTree) oldInstance; - - for (int i = 0; i < t.getChildCount(); i++) - { - out.writeStatement(new Statement(oldInstance, "addChild", - new Object[] - { t.getChild(i) })); - } - } - }); - e.setPersistenceDelegate(CommonToken.class, new PersistenceDelegate() - { - @Override - protected Expression instantiate(Object oldInstance, Encoder out) - { - return new Expression(oldInstance, oldInstance.getClass(), - "new", new Object[] - { ((CommonToken) oldInstance).getType(), - ((CommonToken) oldInstance).getText() }); - } - }); - } - - public static void addHivePersistenceDelegates(XMLEncoder e) - { - e.setPersistenceDelegate(PrimitiveTypeInfo.class, - new PersistenceDelegate() - { - @Override - protected Expression instantiate(Object oldInstance, - Encoder out) - { - return new Expression(oldInstance, - TypeInfoFactory.class, "getPrimitiveTypeInfo", - new Object[] - { ((PrimitiveTypeInfo) oldInstance) - .getTypeName() }); - } - }); - } - - static class EL implements ExceptionListener - { - public void exceptionThrown(Exception e) - { - e.printStackTrace(); - throw new RuntimeException("Cannot serialize the query plan", e); - } - } - - public static void makeTransient(Class beanClass, String... pdNames) { - try { - BeanInfo info = Introspector.getBeanInfo(beanClass); - PropertyDescriptor[] descs = info.getPropertyDescriptors(); - if (descs == null) { - throw new RuntimeException("Cannot access property descriptor for class " + beanClass); - } - Map mapping = new HashMap(); - for (PropertyDescriptor desc : descs) { - mapping.put(desc.getName(), desc); - } - for (String pdName : pdNames) { - PropertyDescriptor desc = mapping.get(pdName); - if (desc == null) { - throw new RuntimeException("Property " + pdName + " does not exist in " + beanClass); - } - desc.setValue("transient", Boolean.TRUE); - } - } - catch (IntrospectionException ie) { - throw new RuntimeException(ie); - } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java index ef5ee95..4b65952 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java @@ -64,10 +64,6 @@ public class ReduceSinkOperator extends TerminalOperator implements Serializable, TopNHash.BinaryCollector { - static { - PTFUtils.makeTransient(ReduceSinkOperator.class, "inputAliases", "valueIndex"); - } - /** * Counters. */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java index d5e946e..803f492 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SerializationUtilities.java @@ -17,14 +17,6 @@ */ package org.apache.hadoop.hive.ql.exec; -import java.beans.DefaultPersistenceDelegate; -import java.beans.Encoder; -import java.beans.ExceptionListener; -import java.beans.Expression; -import java.beans.PersistenceDelegate; -import java.beans.Statement; -import java.beans.XMLDecoder; -import java.beans.XMLEncoder; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.InputStream; @@ -35,32 +27,20 @@ import java.lang.reflect.Field; import java.net.URI; import java.sql.Timestamp; -import java.util.ArrayList; import java.util.Arrays; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; -import java.util.Map; -import java.util.Set; -import org.antlr.runtime.CommonToken; import org.apache.commons.codec.binary.Base64; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; import org.apache.hadoop.hive.ql.log.PerfLogger; import org.apache.hadoop.hive.ql.plan.AbstractOperatorDesc; import org.apache.hadoop.hive.ql.plan.BaseWork; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.MapredWork; -import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.ReduceWork; import org.apache.hadoop.hive.ql.plan.SparkEdgeProperty; import org.apache.hadoop.hive.ql.plan.SparkWork; @@ -289,46 +269,37 @@ public void write(final Kryo kryo, final Output output, final List obj) { } } - /** * Serializes the plan. * * @param plan The plan, such as QueryPlan, MapredWork, etc. * @param out The stream to write to. - * @param conf to pick which serialization format is desired. */ - public static void serializePlan(Object plan, OutputStream out, Configuration conf) { - serializePlan(plan, out, conf, false); + public static void serializePlan(Object plan, OutputStream out) { + serializePlan(plan, out, false); } - public static void serializePlan(Kryo kryo, Object plan, OutputStream out, Configuration conf) { - serializePlan(kryo, plan, out, conf, false); + public static void serializePlan(Kryo kryo, Object plan, OutputStream out) { + serializePlan(kryo, plan, out, false); } - private static void serializePlan(Object plan, OutputStream out, Configuration conf, - boolean cloningPlan) { + private static void serializePlan(Object plan, OutputStream out, boolean cloningPlan) { Kryo kryo = borrowKryo(); try { - serializePlan(kryo, plan, out, conf, cloningPlan); + serializePlan(kryo, plan, out, cloningPlan); } finally { releaseKryo(kryo); } } - private static void serializePlan(Kryo kryo, Object plan, OutputStream out, Configuration conf, - boolean cloningPlan) { + private static void serializePlan(Kryo kryo, Object plan, OutputStream out, boolean cloningPlan) { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SERIALIZE_PLAN); - String serializationType = conf.get(HiveConf.ConfVars.PLAN_SERIALIZATION.varname, "kryo"); - LOG.info("Serializing " + plan.getClass().getSimpleName() + " via " + serializationType); - if ("javaXML".equalsIgnoreCase(serializationType)) { - serializeObjectByJavaXML(plan, out); + LOG.info("Serializing " + plan.getClass().getSimpleName() + " using kryo"); + if (cloningPlan) { + serializeObjectByKryo(kryo, plan, out); } else { - if (cloningPlan) { - serializeObjectByKryo(kryo, plan, out); - } else { - serializeObjectByKryo(kryo, plan, out); - } + serializeObjectByKryo(kryo, plan, out); } perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SERIALIZE_PLAN); } @@ -338,24 +309,21 @@ private static void serializePlan(Kryo kryo, Object plan, OutputStream out, Conf * * @param in The stream to read from. * @param planClass class of plan - * @param conf configuration * @return The plan, such as QueryPlan, MapredWork, etc. */ - public static T deserializePlan(InputStream in, Class planClass, Configuration conf) { - return deserializePlan(in, planClass, conf, false); + public static T deserializePlan(InputStream in, Class planClass) { + return deserializePlan(in, planClass, false); } - public static T deserializePlan(Kryo kryo, InputStream in, Class planClass, - Configuration conf) { - return deserializePlan(kryo, in, planClass, conf, false); + public static T deserializePlan(Kryo kryo, InputStream in, Class planClass) { + return deserializePlan(kryo, in, planClass, false); } - private static T deserializePlan(InputStream in, Class planClass, Configuration conf, - boolean cloningPlan) { + private static T deserializePlan(InputStream in, Class planClass, boolean cloningPlan) { Kryo kryo = borrowKryo(); T result = null; try { - result = deserializePlan(kryo, in, planClass, conf, cloningPlan); + result = deserializePlan(kryo, in, planClass, cloningPlan); } finally { releaseKryo(kryo); } @@ -363,20 +331,15 @@ private static void serializePlan(Kryo kryo, Object plan, OutputStream out, Conf } private static T deserializePlan(Kryo kryo, InputStream in, Class planClass, - Configuration conf, boolean cloningPlan) { + boolean cloningPlan) { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DESERIALIZE_PLAN); T plan; - String serializationType = conf.get(HiveConf.ConfVars.PLAN_SERIALIZATION.varname, "kryo"); - LOG.info("Deserializing " + planClass.getSimpleName() + " via " + serializationType); - if ("javaXML".equalsIgnoreCase(serializationType)) { - plan = deserializeObjectByJavaXML(in); + LOG.info("Deserializing " + planClass.getSimpleName() + " using kryo"); + if (cloningPlan) { + plan = deserializeObjectByKryo(kryo, in, planClass); } else { - if (cloningPlan) { - plan = deserializeObjectByKryo(kryo, in, planClass); - } else { - plan = deserializeObjectByKryo(kryo, in, planClass); - } + plan = deserializeObjectByKryo(kryo, in, planClass); } perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DESERIALIZE_PLAN); return plan; @@ -392,10 +355,9 @@ public static MapredWork clonePlan(MapredWork plan) { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.CLONE_PLAN); ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); - Configuration conf = new HiveConf(); - serializePlan(plan, baos, conf, true); + serializePlan(plan, baos, true); MapredWork newPlan = deserializePlan(new ByteArrayInputStream(baos.toByteArray()), - MapredWork.class, conf, true); + MapredWork.class, true); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.CLONE_PLAN); return newPlan; } @@ -409,201 +371,14 @@ public static BaseWork cloneBaseWork(BaseWork plan) { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.CLONE_PLAN); ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); - Configuration conf = new HiveConf(); - serializePlan(plan, baos, conf, true); + serializePlan(plan, baos, true); BaseWork newPlan = deserializePlan(new ByteArrayInputStream(baos.toByteArray()), - plan.getClass(), conf, true); + plan.getClass(), true); perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.CLONE_PLAN); return newPlan; } /** - * Serialize the object. This helper function mainly makes sure that enums, - * counters, etc are handled properly. - */ - private static void serializeObjectByJavaXML(Object plan, OutputStream out) { - XMLEncoder e = new XMLEncoder(out); - e.setExceptionListener(new ExceptionListener() { - @Override - public void exceptionThrown(Exception e) { - LOG.warn(org.apache.hadoop.util.StringUtils.stringifyException(e)); - throw new RuntimeException("Cannot serialize object", e); - } - }); - // workaround for java 1.5 - e.setPersistenceDelegate(PlanUtils.ExpressionTypes.class, new EnumDelegate()); - e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate()); - e.setPersistenceDelegate(java.sql.Date.class, new DatePersistenceDelegate()); - e.setPersistenceDelegate(Timestamp.class, new TimestampPersistenceDelegate()); - - e.setPersistenceDelegate(org.datanucleus.store.types.backed.Map.class, new MapDelegate()); - e.setPersistenceDelegate(org.datanucleus.store.types.backed.List.class, new ListDelegate()); - e.setPersistenceDelegate(CommonToken.class, new CommonTokenDelegate()); - e.setPersistenceDelegate(Path.class, new PathDelegate()); - - e.writeObject(plan); - e.close(); - } - - - /** - * Java 1.5 workaround. From http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=5015403 - */ - public static class EnumDelegate extends DefaultPersistenceDelegate { - @Override - protected Expression instantiate(Object oldInstance, Encoder out) { - return new Expression(Enum.class, "valueOf", new Object[] {oldInstance.getClass(), - ((Enum) oldInstance).name()}); - } - - @Override - protected boolean mutatesTo(Object oldInstance, Object newInstance) { - return oldInstance == newInstance; - } - } - - public static class MapDelegate extends DefaultPersistenceDelegate { - @Override - protected Expression instantiate(Object oldInstance, Encoder out) { - Map oldMap = (Map) oldInstance; - HashMap newMap = new HashMap(oldMap); - return new Expression(newMap, HashMap.class, "new", new Object[] {}); - } - - @Override - protected boolean mutatesTo(Object oldInstance, Object newInstance) { - return false; - } - - @Override - protected void initialize(Class type, Object oldInstance, Object newInstance, Encoder out) { - java.util.Collection oldO = (java.util.Collection) oldInstance; - java.util.Collection newO = (java.util.Collection) newInstance; - - if (newO.size() != 0) { - out.writeStatement(new Statement(oldInstance, "clear", new Object[] {})); - } - for (Iterator i = oldO.iterator(); i.hasNext();) { - out.writeStatement(new Statement(oldInstance, "add", new Object[] {i.next()})); - } - } - } - - public static class SetDelegate extends DefaultPersistenceDelegate { - @Override - protected Expression instantiate(Object oldInstance, Encoder out) { - Set oldSet = (Set) oldInstance; - HashSet newSet = new HashSet(oldSet); - return new Expression(newSet, HashSet.class, "new", new Object[] {}); - } - - @Override - protected boolean mutatesTo(Object oldInstance, Object newInstance) { - return false; - } - - @Override - protected void initialize(Class type, Object oldInstance, Object newInstance, Encoder out) { - java.util.Collection oldO = (java.util.Collection) oldInstance; - java.util.Collection newO = (java.util.Collection) newInstance; - - if (newO.size() != 0) { - out.writeStatement(new Statement(oldInstance, "clear", new Object[] {})); - } - for (Iterator i = oldO.iterator(); i.hasNext();) { - out.writeStatement(new Statement(oldInstance, "add", new Object[] {i.next()})); - } - } - - } - - public static class ListDelegate extends DefaultPersistenceDelegate { - @Override - protected Expression instantiate(Object oldInstance, Encoder out) { - List oldList = (List) oldInstance; - ArrayList newList = new ArrayList(oldList); - return new Expression(newList, ArrayList.class, "new", new Object[] {}); - } - - @Override - protected boolean mutatesTo(Object oldInstance, Object newInstance) { - return false; - } - - @Override - protected void initialize(Class type, Object oldInstance, Object newInstance, Encoder out) { - java.util.Collection oldO = (java.util.Collection) oldInstance; - java.util.Collection newO = (java.util.Collection) newInstance; - - if (newO.size() != 0) { - out.writeStatement(new Statement(oldInstance, "clear", new Object[] {})); - } - for (Iterator i = oldO.iterator(); i.hasNext();) { - out.writeStatement(new Statement(oldInstance, "add", new Object[] {i.next()})); - } - } - - } - - /** - * DatePersistenceDelegate. Needed to serialize java.util.Date - * since it is not serialization friendly. - * Also works for java.sql.Date since it derives from java.util.Date. - */ - public static class DatePersistenceDelegate extends PersistenceDelegate { - - @Override - protected Expression instantiate(Object oldInstance, Encoder out) { - Date dateVal = (Date)oldInstance; - Object[] args = { dateVal.getTime() }; - return new Expression(dateVal, dateVal.getClass(), "new", args); - } - - @Override - protected boolean mutatesTo(Object oldInstance, Object newInstance) { - if (oldInstance == null || newInstance == null) { - return false; - } - return oldInstance.getClass() == newInstance.getClass(); - } - } - - /** - * TimestampPersistenceDelegate. Needed to serialize java.sql.Timestamp since - * it is not serialization friendly. - */ - public static class TimestampPersistenceDelegate extends DatePersistenceDelegate { - @Override - protected void initialize(Class type, Object oldInstance, Object newInstance, Encoder out) { - Timestamp ts = (Timestamp)oldInstance; - Object[] args = { ts.getNanos() }; - Statement stmt = new Statement(oldInstance, "setNanos", args); - out.writeStatement(stmt); - } - } - - /** - * Need to serialize org.antlr.runtime.CommonToken - */ - public static class CommonTokenDelegate extends PersistenceDelegate { - @Override - protected Expression instantiate(Object oldInstance, Encoder out) { - CommonToken ct = (CommonToken)oldInstance; - Object[] args = {ct.getType(), ct.getText()}; - return new Expression(ct, ct.getClass(), "new", args); - } - } - - public static class PathDelegate extends PersistenceDelegate { - @Override - protected Expression instantiate(Object oldInstance, Encoder out) { - Path p = (Path)oldInstance; - Object[] args = {p.toString()}; - return new Expression(p, p.getClass(), "new", args); - } - } - - /** * @param plan Usually of type MapredWork, MapredLocalWork etc. * @param out stream in which serialized plan is written into */ @@ -614,23 +389,6 @@ private static void serializeObjectByKryo(Kryo kryo, Object plan, OutputStream o output.close(); } - /** - * De-serialize an object. This helper function mainly makes sure that enums, - * counters, etc are handled properly. - */ - @SuppressWarnings("unchecked") - private static T deserializeObjectByJavaXML(InputStream in) { - XMLDecoder d = null; - try { - d = new XMLDecoder(in, null, null); - return (T) d.readObject(); - } finally { - if (null != d) { - d.close(); - } - } - } - private static T deserializeObjectByKryo(Kryo kryo, InputStream in, Class clazz ) { Input inp = new Input(in); kryo.setClassLoader(Utilities.getSessionSpecifiedClassLoader()); @@ -639,13 +397,13 @@ private static void serializeObjectByKryo(Kryo kryo, Object plan, OutputStream o return t; } - public static List> cloneOperatorTree(Configuration conf, List> roots) { + public static List> cloneOperatorTree(List> roots) { ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); - serializePlan(roots, baos, conf, true); + serializePlan(roots, baos, true); @SuppressWarnings("unchecked") List> result = deserializePlan(new ByteArrayInputStream(baos.toByteArray()), - roots.getClass(), conf, true); + roots.getClass(), true); return result; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java index c8e7549..0eab63e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java @@ -48,10 +48,6 @@ public abstract class Task implements Serializable, Node { - static { - PTFUtils.makeTransient(Task.class, "fetchSource"); - } - private static final long serialVersionUID = 1L; public transient HashMap taskCounters; public transient TaskHandle taskHandle; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index c01994f..5135eb9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -404,29 +404,29 @@ private static BaseWork getBaseWork(Configuration conf, String name) { if(MAP_PLAN_NAME.equals(name)){ if (ExecMapper.class.getName().equals(conf.get(MAPRED_MAPPER_CLASS))){ - gWork = SerializationUtilities.deserializePlan(kryo, in, MapWork.class, conf); + gWork = SerializationUtilities.deserializePlan(kryo, in, MapWork.class); } else if(MergeFileMapper.class.getName().equals(conf.get(MAPRED_MAPPER_CLASS))) { - gWork = SerializationUtilities.deserializePlan(kryo, in, MergeFileWork.class, conf); + gWork = SerializationUtilities.deserializePlan(kryo, in, MergeFileWork.class); } else if(ColumnTruncateMapper.class.getName().equals(conf.get(MAPRED_MAPPER_CLASS))) { - gWork = SerializationUtilities.deserializePlan(kryo, in, ColumnTruncateWork.class, conf); + gWork = SerializationUtilities.deserializePlan(kryo, in, ColumnTruncateWork.class); } else if(PartialScanMapper.class.getName().equals(conf.get(MAPRED_MAPPER_CLASS))) { - gWork = SerializationUtilities.deserializePlan(kryo, in, PartialScanWork.class,conf); + gWork = SerializationUtilities.deserializePlan(kryo, in, PartialScanWork.class); } else { throw new RuntimeException("unable to determine work from configuration ." + MAPRED_MAPPER_CLASS + " was "+ conf.get(MAPRED_MAPPER_CLASS)) ; } } else if (REDUCE_PLAN_NAME.equals(name)) { if(ExecReducer.class.getName().equals(conf.get(MAPRED_REDUCER_CLASS))) { - gWork = SerializationUtilities.deserializePlan(kryo, in, ReduceWork.class, conf); + gWork = SerializationUtilities.deserializePlan(kryo, in, ReduceWork.class); } else { throw new RuntimeException("unable to determine work from configuration ." + MAPRED_REDUCER_CLASS +" was "+ conf.get(MAPRED_REDUCER_CLASS)) ; } } else if (name.contains(MERGE_PLAN_NAME)) { if (name.startsWith(MAPNAME)) { - gWork = SerializationUtilities.deserializePlan(kryo, in, MapWork.class, conf); + gWork = SerializationUtilities.deserializePlan(kryo, in, MapWork.class); } else if (name.startsWith(REDUCENAME)) { - gWork = SerializationUtilities.deserializePlan(kryo, in, ReduceWork.class, conf); + gWork = SerializationUtilities.deserializePlan(kryo, in, ReduceWork.class); } else { throw new RuntimeException("Unknown work type: " + name); } @@ -525,7 +525,7 @@ private static Path setBaseWork(Configuration conf, BaseWork w, Path hiveScratch ByteArrayOutputStream byteOut = new ByteArrayOutputStream(); try { out = new DeflaterOutputStream(byteOut, new Deflater(Deflater.BEST_SPEED)); - SerializationUtilities.serializePlan(kryo, w, out, conf); + SerializationUtilities.serializePlan(kryo, w, out); out.close(); out = null; } finally { @@ -539,7 +539,7 @@ private static Path setBaseWork(Configuration conf, BaseWork w, Path hiveScratch FileSystem fs = planPath.getFileSystem(conf); try { out = fs.create(planPath); - SerializationUtilities.serializePlan(kryo, w, out, conf); + SerializationUtilities.serializePlan(kryo, w, out); out.close(); out = null; } finally { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index 2129bda..971dac9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -741,13 +741,12 @@ public static void main(String[] args) throws IOException, HiveException { int ret; if (localtask) { memoryMXBean = ManagementFactory.getMemoryMXBean(); - MapredLocalWork plan = SerializationUtilities.deserializePlan(pathData, MapredLocalWork.class, - conf); + MapredLocalWork plan = SerializationUtilities.deserializePlan(pathData, MapredLocalWork.class); MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent); ret = ed.executeInProcess(new DriverContext()); } else { - MapredWork plan = SerializationUtilities.deserializePlan(pathData, MapredWork.class, conf); + MapredWork plan = SerializationUtilities.deserializePlan(pathData, MapredWork.class); ExecDriver ed = new ExecDriver(plan, conf, isSilent); ret = ed.execute(new DriverContext()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java index cb70ac8..7ec5ed1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java @@ -178,7 +178,7 @@ public int execute(DriverContext driverContext) { OutputStream out = null; try { out = FileSystem.getLocal(conf).create(planPath); - SerializationUtilities.serializePlan(plan, out, conf); + SerializationUtilities.serializePlan(plan, out); out.close(); out = null; } finally { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java index cb7dfa1..f191572 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java @@ -160,7 +160,7 @@ public int executeInChildVM(DriverContext driverContext) { OutputStream out = null; try { out = FileSystem.getLocal(conf).create(planPath); - SerializationUtilities.serializePlan(plan, out, conf); + SerializationUtilities.serializePlan(plan, out); out.close(); out = null; } finally { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java index 41d3522..4805162 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenSparkSkewJoinProcessor.java @@ -256,7 +256,7 @@ public static void processSkewJoin(JoinOperator joinOp, Task> reducerList = new ArrayList>(); reducerList.add(reduceWork.getReducer()); Operator reducer = SerializationUtilities.cloneOperatorTree( - parseCtx.getConf(), reducerList).get(0); + reducerList).get(0); Preconditions.checkArgument(reducer instanceof JoinOperator, "Reducer should be join operator, but actually is " + reducer.getName()); JoinOperator cloneJoinOp = (JoinOperator) reducer; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java index f656998..70912e0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java @@ -28,7 +28,6 @@ import java.util.List; import java.util.Set; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.AppMasterEventOperator; @@ -204,8 +203,7 @@ protected void setupMapWork(MapWork mapWork, GenTezProcContext context, } // removes any union operator and clones the plan - public static void removeUnionOperators(Configuration conf, GenTezProcContext context, - BaseWork work) + public static void removeUnionOperators(GenTezProcContext context, BaseWork work) throws SemanticException { List> roots = new ArrayList>(); @@ -216,7 +214,7 @@ public static void removeUnionOperators(Configuration conf, GenTezProcContext co roots.addAll(context.eventOperatorSet); // need to clone the plan. - List> newRoots = SerializationUtilities.cloneOperatorTree(conf, roots); + List> newRoots = SerializationUtilities.cloneOperatorTree(roots); // we're cloning the operator plan but we're retaining the original work. That means // that root operators have to be replaced with the cloned ops. The replacement map diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index f563b99..147ff7e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -371,7 +371,7 @@ protected void generateTaskTree(List> rootTasks, Pa // we need to clone some operator plans and remove union operators still for (BaseWork w: procCtx.workWithUnionOperators) { - GenTezUtils.removeUnionOperators(conf, procCtx, w); + GenTezUtils.removeUnionOperators(procCtx, w); } // then we make sure the file sink operators are set up right diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java index 924848f..aa33103 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/GenSparkUtils.java @@ -28,7 +28,6 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; @@ -188,8 +187,7 @@ private void collectOperators(Operator op, List> opList) { } // removes any union operator and clones the plan - public void removeUnionOperators(Configuration conf, GenSparkProcContext context, - BaseWork work) + public void removeUnionOperators(GenSparkProcContext context, BaseWork work) throws SemanticException { List> roots = new ArrayList>(); @@ -207,7 +205,7 @@ public void removeUnionOperators(Configuration conf, GenSparkProcContext context } // need to clone the plan. - List> newRoots = SerializationUtilities.cloneOperatorTree(conf, roots); + List> newRoots = SerializationUtilities.cloneOperatorTree(roots); // Build a map to map the original FileSinkOperator and the cloned FileSinkOperators // This map is used for set the stats flag for the cloned FileSinkOperators in later process diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java index 3673da4..08278de 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SparkCompiler.java @@ -238,7 +238,7 @@ protected void generateTaskTree(List> rootTasks, Pa // we need to clone some operator plans and remove union operators still for (BaseWork w : procCtx.workWithUnionOperators) { - GenSparkUtils.getUtils().removeUnionOperators(conf, procCtx, w); + GenSparkUtils.getUtils().removeUnionOperators(procCtx, w); } // we need to fill MapWork with 'local' work and bucket information for SMB Join. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SplitOpTreeForDPP.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SplitOpTreeForDPP.java index 4bb661a..d4f58be 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SplitOpTreeForDPP.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/spark/SplitOpTreeForDPP.java @@ -107,8 +107,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, filterOp.setChildOperators(Utilities.makeList(selOp)); // Now clone the tree above selOp - List> newRoots = SerializationUtilities.cloneOperatorTree( - context.parseContext.getConf(), roots); + List> newRoots = SerializationUtilities.cloneOperatorTree(roots); for (int i = 0; i < roots.size(); i++) { TableScanOperator newTs = (TableScanOperator) newRoots.get(i); TableScanOperator oldTs = (TableScanOperator) roots.get(i); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java index 463da5d..bc67e5a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AbstractOperatorDesc.java @@ -32,10 +32,6 @@ protected transient Map opProps; protected long memNeeded = 0; - static { - PTFUtils.makeTransient(AbstractOperatorDesc.class, "opProps"); - } - @Override @Explain(skipHeader = true, displayName = "Statistics", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) public Statistics getStatistics() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/AggregationDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/AggregationDesc.java index 1a0cdf8..1ecbaad 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/AggregationDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/AggregationDesc.java @@ -33,10 +33,6 @@ */ public class AggregationDesc implements java.io.Serializable { - static { - PTFUtils.makeTransient(AggregationDesc.class, "genericUDAFEvaluator"); - } - private static final long serialVersionUID = 1L; private String genericUDAFName; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java index 0226278..df153a2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java @@ -36,9 +36,6 @@ private String columnTypes; private String destinationCreateTable; - static { - PTFUtils.makeTransient(LoadFileDesc.class, "targetDir"); - } public LoadFileDesc() { } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java index 0627b9f..2b01712 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/LoadMultiFilesDesc.java @@ -37,9 +37,6 @@ private String columnTypes; private transient List srcDirs; - static { - PTFUtils.makeTransient(LoadMultiFilesDesc.class, "targetDirs"); - } public LoadMultiFilesDesc() { } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java index 3ec3b1f..c4b49b6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PTFDesc.java @@ -19,20 +19,19 @@ package org.apache.hadoop.hive.ql.plan; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.ql.exec.PTFUtils; import org.apache.hadoop.hive.ql.parse.LeadLagInfo; +import org.apache.hadoop.hive.ql.plan.Explain.Level; import org.apache.hadoop.hive.ql.plan.ptf.PTFInputDef; import org.apache.hadoop.hive.ql.plan.ptf.PartitionedTableFunctionDef; import org.apache.hadoop.hive.ql.plan.ptf.WindowTableFunctionDef; import org.apache.hadoop.hive.ql.udf.ptf.Noop; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import org.apache.hadoop.hive.ql.plan.Explain.Level; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @Explain(displayName = "PTF Operator", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED }) @@ -42,7 +41,7 @@ private static final Logger LOG = LoggerFactory.getLogger(PTFDesc.class.getName()); PartitionedTableFunctionDef funcDef; - LeadLagInfo llInfo; + transient LeadLagInfo llInfo; /* * is this PTFDesc for a Map-Side PTF Operation? */ @@ -50,11 +49,6 @@ transient Configuration cfg; - static{ - PTFUtils.makeTransient(PTFDesc.class, "llInfo"); - PTFUtils.makeTransient(PTFDesc.class, "cfg"); - } - public PartitionedTableFunctionDef getFuncDef() { return funcDef; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java index 43bf7c5..098aa89 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java @@ -41,10 +41,6 @@ public class TableScanDesc extends AbstractOperatorDesc { private static final long serialVersionUID = 1L; - static { - PTFUtils.makeTransient(TableScanDesc.class, "filterObject", "referencedColumns", "tableMetadata"); - } - private String alias; private List virtualCols; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PTFExpressionDef.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PTFExpressionDef.java index 02664a8..fa7fc76 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PTFExpressionDef.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/PTFExpressionDef.java @@ -31,10 +31,6 @@ transient ExprNodeEvaluator exprEvaluator; transient ObjectInspector OI; - static{ - PTFUtils.makeTransient(PTFExpressionDef.class, "exprEvaluator", "OI"); - } - public PTFExpressionDef() {} public PTFExpressionDef(PTFExpressionDef e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/ShapeDetails.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/ShapeDetails.java index 85d7f61..bc2ee83 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/ShapeDetails.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ptf/ShapeDetails.java @@ -36,10 +36,6 @@ transient RowResolver rr; transient TypeCheckCtx typeCheckCtx; - static { - PTFUtils.makeTransient(ShapeDetails.class, "OI", "serde", "rr", "typeCheckCtx"); - } - public String getSerdeClassName() { return serdeClassName; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java index 4740b81..351b593 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLeadLag.java @@ -41,11 +41,6 @@ transient Converter defaultValueConverter; int amt; - static { - PTFUtils.makeTransient(GenericUDFLeadLag.class, "exprEvaluator", "pItr", "firstArgOI", - "defaultArgOI", "defaultValueConverter"); - } - @Override public Object evaluate(DeferredObject[] arguments) throws HiveException { Object defaultVal = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java index a0f5a7a..c76118b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/TableFunctionEvaluator.java @@ -94,10 +94,6 @@ transient protected PTFPartition outputPartition; transient protected boolean canAcceptInputAsStream; - static { - PTFUtils.makeTransient(TableFunctionEvaluator.class, "outputOI", "rawInputOI"); - } - public StructObjectInspector getOutputOI() { return OI; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java index c1667c2..64db486 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java @@ -83,7 +83,7 @@ public void testPlan() throws Exception { JobConf job = new JobConf(TestPlan.class); // serialize the configuration once .. ByteArrayOutputStream baos = new ByteArrayOutputStream(); - SerializationUtilities.serializePlan(mrwork, baos, job); + SerializationUtilities.serializePlan(mrwork, baos); baos.close(); String v1 = baos.toString(); @@ -101,7 +101,7 @@ public void testPlan() throws Exception { // serialize again baos.reset(); - SerializationUtilities.serializePlan(mrwork2, baos, job); + SerializationUtilities.serializePlan(mrwork2, baos); baos.close(); // verify that the two are equal diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 1ff7eb5..a4b948a 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -1615,7 +1615,7 @@ JobConf createMockExecutionEnvironment(Path workDir, Path mapXml = new Path(workDir, "map.xml"); localFs.delete(mapXml, true); FSDataOutputStream planStream = localFs.create(mapXml); - SerializationUtilities.serializePlan(mapWork, planStream, conf); + SerializationUtilities.serializePlan(mapWork, planStream); planStream.close(); return conf; } diff --git a/ql/src/test/queries/clientpositive/cast_qualified_types.q b/ql/src/test/queries/clientpositive/cast_qualified_types.q index fe0abd4..4f977c1 100644 --- a/ql/src/test/queries/clientpositive/cast_qualified_types.q +++ b/ql/src/test/queries/clientpositive/cast_qualified_types.q @@ -1,5 +1,3 @@ -set hive.plan.serialization.format=javaXML; - select cast(key as decimal(10,2)) as c1, cast(key as char(10)) as c2,