diff --git itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java index d9fb645..4a5b3f7 100644 --- itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java +++ itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java @@ -900,6 +900,10 @@ public Function getFunction(String catName, String dbName, String funcName) } @Override + public void clear_aggregate_col_stats_cache() { + } + + @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { diff --git itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index 9fe9d05..c1fcb4d 100644 --- itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -1132,6 +1132,8 @@ public String cliInit(File file) throws Exception { newSession(false); } + db.getMSC().clearAggregateColStatsCache(); + CliSessionState ss = (CliSessionState) SessionState.get(); String outFileExtension = getOutFileExtension(fileName); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index e0431e5..1c11e86 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -264,6 +264,8 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public void clear_aggregate_col_stats_cache() throws MetaException, org.apache.thrift.TException; + public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; @@ -714,6 +716,8 @@ public void get_partitions_statistics_req(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void clear_aggregate_col_stats_cache(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_aggr_stats_for(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void set_aggr_stats_for(SetPartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -4332,6 +4336,28 @@ public PartitionsStatsResult recv_get_partitions_statistics_req() throws NoSuchO throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_partitions_statistics_req failed: unknown result"); } + public void clear_aggregate_col_stats_cache() throws MetaException, org.apache.thrift.TException + { + send_clear_aggregate_col_stats_cache(); + recv_clear_aggregate_col_stats_cache(); + } + + public void send_clear_aggregate_col_stats_cache() throws org.apache.thrift.TException + { + clear_aggregate_col_stats_cache_args args = new clear_aggregate_col_stats_cache_args(); + sendBase("clear_aggregate_col_stats_cache", args); + } + + public void recv_clear_aggregate_col_stats_cache() throws MetaException, org.apache.thrift.TException + { + clear_aggregate_col_stats_cache_result result = new clear_aggregate_col_stats_cache_result(); + receiveBase(result, "clear_aggregate_col_stats_cache"); + if (result.o1 != null) { + throw result.o1; + } + return; + } + public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException { send_get_aggr_stats_for(request); @@ -11316,6 +11342,35 @@ public PartitionsStatsResult getResult() throws NoSuchObjectException, MetaExcep } } + public void clear_aggregate_col_stats_cache(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + clear_aggregate_col_stats_cache_call method_call = new clear_aggregate_col_stats_cache_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class clear_aggregate_col_stats_cache_call extends org.apache.thrift.async.TAsyncMethodCall { + public clear_aggregate_col_stats_cache_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("clear_aggregate_col_stats_cache", org.apache.thrift.protocol.TMessageType.CALL, 0)); + clear_aggregate_col_stats_cache_args args = new clear_aggregate_col_stats_cache_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws MetaException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_clear_aggregate_col_stats_cache(); + } + } + public void get_aggr_stats_for(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); get_aggr_stats_for_call method_call = new get_aggr_stats_for_call(request, resultHandler, this, ___protocolFactory, ___transport); @@ -15099,6 +15154,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public clear_aggregate_col_stats_cache() { + super("clear_aggregate_col_stats_cache"); + } + + public clear_aggregate_col_stats_cache_args getEmptyArgsInstance() { + return new clear_aggregate_col_stats_cache_args(); + } + + protected boolean isOneway() { + return false; + } + + public clear_aggregate_col_stats_cache_result getResult(I iface, clear_aggregate_col_stats_cache_args args) throws org.apache.thrift.TException { + clear_aggregate_col_stats_cache_result result = new clear_aggregate_col_stats_cache_result(); + try { + iface.clear_aggregate_col_stats_cache(); + } catch (MetaException o1) { + result.o1 = o1; + } + return result; + } + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_aggr_stats_for extends org.apache.thrift.ProcessFunction { public get_aggr_stats_for() { super("get_aggr_stats_for"); @@ -21036,6 +21116,7 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { - public get_aggr_stats_for() { - super("get_aggr_stats_for"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class clear_aggregate_col_stats_cache extends org.apache.thrift.AsyncProcessFunction { + public clear_aggregate_col_stats_cache() { + super("clear_aggregate_col_stats_cache"); } - public get_aggr_stats_for_args getEmptyArgsInstance() { - return new get_aggr_stats_for_args(); + public clear_aggregate_col_stats_cache_args getEmptyArgsInstance() { + return new clear_aggregate_col_stats_cache_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(AggrStats o) { - get_aggr_stats_for_result result = new get_aggr_stats_for_result(); - result.success = o; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + clear_aggregate_col_stats_cache_result result = new clear_aggregate_col_stats_cache_result(); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -28211,17 +28291,12 @@ public void onComplete(AggrStats o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - get_aggr_stats_for_result result = new get_aggr_stats_for_result(); - if (e instanceof NoSuchObjectException) { - result.o1 = (NoSuchObjectException) e; + clear_aggregate_col_stats_cache_result result = new clear_aggregate_col_stats_cache_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; - result.setO2IsSet(true); - msg = result; - } else { msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; @@ -28242,100 +28317,26 @@ protected boolean isOneway() { return false; } - public void start(I iface, get_aggr_stats_for_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.get_aggr_stats_for(args.request,resultHandler); + public void start(I iface, clear_aggregate_col_stats_cache_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.clear_aggregate_col_stats_cache(resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class set_aggr_stats_for extends org.apache.thrift.AsyncProcessFunction { - public set_aggr_stats_for() { - super("set_aggr_stats_for"); - } - - public set_aggr_stats_for_args getEmptyArgsInstance() { - return new set_aggr_stats_for_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Boolean o) { - set_aggr_stats_for_result result = new set_aggr_stats_for_result(); - result.success = o; - result.setSuccessIsSet(true); - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - set_aggr_stats_for_result result = new set_aggr_stats_for_result(); - if (e instanceof NoSuchObjectException) { - result.o1 = (NoSuchObjectException) e; - result.setO1IsSet(true); - msg = result; - } - else if (e instanceof InvalidObjectException) { - result.o2 = (InvalidObjectException) e; - result.setO2IsSet(true); - msg = result; - } - else if (e instanceof MetaException) { - result.o3 = (MetaException) e; - result.setO3IsSet(true); - msg = result; - } - else if (e instanceof InvalidInputException) { - result.o4 = (InvalidInputException) e; - result.setO4IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, set_aggr_stats_for_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.set_aggr_stats_for(args.request,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class delete_partition_column_statistics extends org.apache.thrift.AsyncProcessFunction { - public delete_partition_column_statistics() { - super("delete_partition_column_statistics"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_aggr_stats_for extends org.apache.thrift.AsyncProcessFunction { + public get_aggr_stats_for() { + super("get_aggr_stats_for"); } - public delete_partition_column_statistics_args getEmptyArgsInstance() { - return new delete_partition_column_statistics_args(); + public get_aggr_stats_for_args getEmptyArgsInstance() { + return new get_aggr_stats_for_args(); } - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Boolean o) { - delete_partition_column_statistics_result result = new delete_partition_column_statistics_result(); + return new AsyncMethodCallback() { + public void onComplete(AggrStats o) { + get_aggr_stats_for_result result = new get_aggr_stats_for_result(); result.success = o; - result.setSuccessIsSet(true); try { fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); return; @@ -28347,7 +28348,7 @@ public void onComplete(Boolean o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - delete_partition_column_statistics_result result = new delete_partition_column_statistics_result(); + get_aggr_stats_for_result result = new get_aggr_stats_for_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); @@ -28358,16 +28359,6 @@ else if (e instanceof MetaException) { result.setO2IsSet(true); msg = result; } - else if (e instanceof InvalidObjectException) { - result.o3 = (InvalidObjectException) e; - result.setO3IsSet(true); - msg = result; - } - else if (e instanceof InvalidInputException) { - result.o4 = (InvalidInputException) e; - result.setO4IsSet(true); - msg = result; - } else { msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; @@ -28388,25 +28379,25 @@ protected boolean isOneway() { return false; } - public void start(I iface, delete_partition_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name,resultHandler); + public void start(I iface, get_aggr_stats_for_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.get_aggr_stats_for(args.request,resultHandler); } } - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class delete_table_column_statistics extends org.apache.thrift.AsyncProcessFunction { - public delete_table_column_statistics() { - super("delete_table_column_statistics"); + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class set_aggr_stats_for extends org.apache.thrift.AsyncProcessFunction { + public set_aggr_stats_for() { + super("set_aggr_stats_for"); } - public delete_table_column_statistics_args getEmptyArgsInstance() { - return new delete_table_column_statistics_args(); + public set_aggr_stats_for_args getEmptyArgsInstance() { + return new set_aggr_stats_for_args(); } public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { final org.apache.thrift.AsyncProcessFunction fcall = this; return new AsyncMethodCallback() { public void onComplete(Boolean o) { - delete_table_column_statistics_result result = new delete_table_column_statistics_result(); + set_aggr_stats_for_result result = new set_aggr_stats_for_result(); result.success = o; result.setSuccessIsSet(true); try { @@ -28420,83 +28411,12 @@ public void onComplete(Boolean o) { public void onError(Exception e) { byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; org.apache.thrift.TBase msg; - delete_table_column_statistics_result result = new delete_table_column_statistics_result(); + set_aggr_stats_for_result result = new set_aggr_stats_for_result(); if (e instanceof NoSuchObjectException) { result.o1 = (NoSuchObjectException) e; result.setO1IsSet(true); msg = result; } - else if (e instanceof MetaException) { - result.o2 = (MetaException) e; - result.setO2IsSet(true); - msg = result; - } - else if (e instanceof InvalidObjectException) { - result.o3 = (InvalidObjectException) e; - result.setO3IsSet(true); - msg = result; - } - else if (e instanceof InvalidInputException) { - result.o4 = (InvalidInputException) e; - result.setO4IsSet(true); - msg = result; - } - else - { - msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; - msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); - } - try { - fcall.sendResponse(fb,msg,msgType,seqid); - return; - } catch (Exception ex) { - LOGGER.error("Exception writing to internal frame buffer", ex); - } - fb.close(); - } - }; - } - - protected boolean isOneway() { - return false; - } - - public void start(I iface, delete_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { - iface.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name,resultHandler); - } - } - - @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_function extends org.apache.thrift.AsyncProcessFunction { - public create_function() { - super("create_function"); - } - - public create_function_args getEmptyArgsInstance() { - return new create_function_args(); - } - - public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { - final org.apache.thrift.AsyncProcessFunction fcall = this; - return new AsyncMethodCallback() { - public void onComplete(Void o) { - create_function_result result = new create_function_result(); - try { - fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); - return; - } catch (Exception e) { - LOGGER.error("Exception writing to internal frame buffer", e); - } - fb.close(); - } - public void onError(Exception e) { - byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; - org.apache.thrift.TBase msg; - create_function_result result = new create_function_result(); - if (e instanceof AlreadyExistsException) { - result.o1 = (AlreadyExistsException) e; - result.setO1IsSet(true); - msg = result; - } else if (e instanceof InvalidObjectException) { result.o2 = (InvalidObjectException) e; result.setO2IsSet(true); @@ -28507,8 +28427,225 @@ else if (e instanceof MetaException) { result.setO3IsSet(true); msg = result; } - else if (e instanceof NoSuchObjectException) { - result.o4 = (NoSuchObjectException) e; + else if (e instanceof InvalidInputException) { + result.o4 = (InvalidInputException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, set_aggr_stats_for_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.set_aggr_stats_for(args.request,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class delete_partition_column_statistics extends org.apache.thrift.AsyncProcessFunction { + public delete_partition_column_statistics() { + super("delete_partition_column_statistics"); + } + + public delete_partition_column_statistics_args getEmptyArgsInstance() { + return new delete_partition_column_statistics_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Boolean o) { + delete_partition_column_statistics_result result = new delete_partition_column_statistics_result(); + result.success = o; + result.setSuccessIsSet(true); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + delete_partition_column_statistics_result result = new delete_partition_column_statistics_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o3 = (InvalidObjectException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof InvalidInputException) { + result.o4 = (InvalidInputException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, delete_partition_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.delete_partition_column_statistics(args.db_name, args.tbl_name, args.part_name, args.col_name,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class delete_table_column_statistics extends org.apache.thrift.AsyncProcessFunction { + public delete_table_column_statistics() { + super("delete_table_column_statistics"); + } + + public delete_table_column_statistics_args getEmptyArgsInstance() { + return new delete_table_column_statistics_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Boolean o) { + delete_table_column_statistics_result result = new delete_table_column_statistics_result(); + result.success = o; + result.setSuccessIsSet(true); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + delete_table_column_statistics_result result = new delete_table_column_statistics_result(); + if (e instanceof NoSuchObjectException) { + result.o1 = (NoSuchObjectException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o2 = (MetaException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o3 = (InvalidObjectException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof InvalidInputException) { + result.o4 = (InvalidInputException) e; + result.setO4IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, delete_table_column_statistics_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.delete_table_column_statistics(args.db_name, args.tbl_name, args.col_name,resultHandler); + } + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class create_function extends org.apache.thrift.AsyncProcessFunction { + public create_function() { + super("create_function"); + } + + public create_function_args getEmptyArgsInstance() { + return new create_function_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + create_function_result result = new create_function_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + create_function_result result = new create_function_result(); + if (e instanceof AlreadyExistsException) { + result.o1 = (AlreadyExistsException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof InvalidObjectException) { + result.o2 = (InvalidObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else if (e instanceof MetaException) { + result.o3 = (MetaException) e; + result.setO3IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o4 = (NoSuchObjectException) e; result.setO4IsSet(true); msg = result; } @@ -156449,6 +156586,612 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_stati } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class clear_aggregate_col_stats_cache_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_aggregate_col_stats_cache_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new clear_aggregate_col_stats_cache_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_aggregate_col_stats_cache_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_aggregate_col_stats_cache_args.class, metaDataMap); + } + + public clear_aggregate_col_stats_cache_args() { + } + + /** + * Performs a deep copy on other. + */ + public clear_aggregate_col_stats_cache_args(clear_aggregate_col_stats_cache_args other) { + } + + public clear_aggregate_col_stats_cache_args deepCopy() { + return new clear_aggregate_col_stats_cache_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof clear_aggregate_col_stats_cache_args) + return this.equals((clear_aggregate_col_stats_cache_args)that); + return false; + } + + public boolean equals(clear_aggregate_col_stats_cache_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(clear_aggregate_col_stats_cache_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("clear_aggregate_col_stats_cache_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class clear_aggregate_col_stats_cache_argsStandardSchemeFactory implements SchemeFactory { + public clear_aggregate_col_stats_cache_argsStandardScheme getScheme() { + return new clear_aggregate_col_stats_cache_argsStandardScheme(); + } + } + + private static class clear_aggregate_col_stats_cache_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_aggregate_col_stats_cache_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_aggregate_col_stats_cache_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class clear_aggregate_col_stats_cache_argsTupleSchemeFactory implements SchemeFactory { + public clear_aggregate_col_stats_cache_argsTupleScheme getScheme() { + return new clear_aggregate_col_stats_cache_argsTupleScheme(); + } + } + + private static class clear_aggregate_col_stats_cache_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, clear_aggregate_col_stats_cache_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, clear_aggregate_col_stats_cache_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class clear_aggregate_col_stats_cache_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clear_aggregate_col_stats_cache_result"); + + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new clear_aggregate_col_stats_cache_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clear_aggregate_col_stats_cache_resultTupleSchemeFactory()); + } + + private MetaException o1; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + O1((short)1, "o1"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // O1 + return O1; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clear_aggregate_col_stats_cache_result.class, metaDataMap); + } + + public clear_aggregate_col_stats_cache_result() { + } + + public clear_aggregate_col_stats_cache_result( + MetaException o1) + { + this(); + this.o1 = o1; + } + + /** + * Performs a deep copy on other. + */ + public clear_aggregate_col_stats_cache_result(clear_aggregate_col_stats_cache_result other) { + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + } + + public clear_aggregate_col_stats_cache_result deepCopy() { + return new clear_aggregate_col_stats_cache_result(this); + } + + @Override + public void clear() { + this.o1 = null; + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case O1: + return getO1(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case O1: + return isSetO1(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof clear_aggregate_col_stats_cache_result) + return this.equals((clear_aggregate_col_stats_cache_result)that); + return false; + } + + public boolean equals(clear_aggregate_col_stats_cache_result that) { + if (that == null) + return false; + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + return list.hashCode(); + } + + @Override + public int compareTo(clear_aggregate_col_stats_cache_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("clear_aggregate_col_stats_cache_result("); + boolean first = true; + + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class clear_aggregate_col_stats_cache_resultStandardSchemeFactory implements SchemeFactory { + public clear_aggregate_col_stats_cache_resultStandardScheme getScheme() { + return new clear_aggregate_col_stats_cache_resultStandardScheme(); + } + } + + private static class clear_aggregate_col_stats_cache_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, clear_aggregate_col_stats_cache_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, clear_aggregate_col_stats_cache_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class clear_aggregate_col_stats_cache_resultTupleSchemeFactory implements SchemeFactory { + public clear_aggregate_col_stats_cache_resultTupleScheme getScheme() { + return new clear_aggregate_col_stats_cache_resultTupleScheme(); + } + } + + private static class clear_aggregate_col_stats_cache_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, clear_aggregate_col_stats_cache_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, clear_aggregate_col_stats_cache_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + } + } + + } + @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class get_aggr_stats_for_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_aggr_stats_for_args"); diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 333a2d9..845f4d0 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -943,6 +943,10 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function get_partitions_statistics_req(\metastore\PartitionsStatsRequest $request); /** + * @throws \metastore\MetaException + */ + public function clear_aggregate_col_stats_cache(); + /** * @param \metastore\PartitionsStatsRequest $request * @return \metastore\AggrStats * @throws \metastore\NoSuchObjectException @@ -8144,6 +8148,56 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_partitions_statistics_req failed: unknown result"); } + public function clear_aggregate_col_stats_cache() + { + $this->send_clear_aggregate_col_stats_cache(); + $this->recv_clear_aggregate_col_stats_cache(); + } + + public function send_clear_aggregate_col_stats_cache() + { + $args = new \metastore\ThriftHiveMetastore_clear_aggregate_col_stats_cache_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'clear_aggregate_col_stats_cache', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('clear_aggregate_col_stats_cache', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_clear_aggregate_col_stats_cache() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_clear_aggregate_col_stats_cache_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_clear_aggregate_col_stats_cache_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->o1 !== null) { + throw $result->o1; + } + return; + } + public function get_aggr_stats_for(\metastore\PartitionsStatsRequest $request) { $this->send_get_aggr_stats_for($request); @@ -42013,6 +42067,133 @@ class ThriftHiveMetastore_get_partitions_statistics_req_result { } +class ThriftHiveMetastore_clear_aggregate_col_stats_cache_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHiveMetastore_clear_aggregate_col_stats_cache_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_clear_aggregate_col_stats_cache_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_clear_aggregate_col_stats_cache_result { + static $_TSPEC; + + /** + * @var \metastore\MetaException + */ + public $o1 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_clear_aggregate_col_stats_cache_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_clear_aggregate_col_stats_cache_result'); + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_get_aggr_stats_for_args { static $_TSPEC; diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 9aeae9f..ec48bf3 100755 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -135,6 +135,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)') print(' TableStatsResult get_table_statistics_req(TableStatsRequest request)') print(' PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)') + print(' void clear_aggregate_col_stats_cache()') print(' AggrStats get_aggr_stats_for(PartitionsStatsRequest request)') print(' bool set_aggr_stats_for(SetPartitionsStatsRequest request)') print(' bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)') @@ -982,6 +983,12 @@ elif cmd == 'get_partitions_statistics_req': sys.exit(1) pp.pprint(client.get_partitions_statistics_req(eval(args[0]),)) +elif cmd == 'clear_aggregate_col_stats_cache': + if len(args) != 0: + print('clear_aggregate_col_stats_cache requires 0 args') + sys.exit(1) + pp.pprint(client.clear_aggregate_col_stats_cache()) + elif cmd == 'get_aggr_stats_for': if len(args) != 1: print('get_aggr_stats_for requires 1 args') diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index eadf300..61b660d 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -935,6 +935,9 @@ def get_partitions_statistics_req(self, request): """ pass + def clear_aggregate_col_stats_cache(self): + pass + def get_aggr_stats_for(self, request): """ Parameters: @@ -5894,6 +5897,32 @@ def recv_get_partitions_statistics_req(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_partitions_statistics_req failed: unknown result") + def clear_aggregate_col_stats_cache(self): + self.send_clear_aggregate_col_stats_cache() + self.recv_clear_aggregate_col_stats_cache() + + def send_clear_aggregate_col_stats_cache(self): + self._oprot.writeMessageBegin('clear_aggregate_col_stats_cache', TMessageType.CALL, self._seqid) + args = clear_aggregate_col_stats_cache_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_clear_aggregate_col_stats_cache(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = clear_aggregate_col_stats_cache_result() + result.read(iprot) + iprot.readMessageEnd() + if result.o1 is not None: + raise result.o1 + return + def get_aggr_stats_for(self, request): """ Parameters: @@ -9764,6 +9793,7 @@ def __init__(self, handler): self._processMap["get_partition_column_statistics"] = Processor.process_get_partition_column_statistics self._processMap["get_table_statistics_req"] = Processor.process_get_table_statistics_req self._processMap["get_partitions_statistics_req"] = Processor.process_get_partitions_statistics_req + self._processMap["clear_aggregate_col_stats_cache"] = Processor.process_clear_aggregate_col_stats_cache self._processMap["get_aggr_stats_for"] = Processor.process_get_aggr_stats_for self._processMap["set_aggr_stats_for"] = Processor.process_set_aggr_stats_for self._processMap["delete_partition_column_statistics"] = Processor.process_delete_partition_column_statistics @@ -12769,6 +12799,28 @@ def process_get_partitions_statistics_req(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_clear_aggregate_col_stats_cache(self, seqid, iprot, oprot): + args = clear_aggregate_col_stats_cache_args() + args.read(iprot) + iprot.readMessageEnd() + result = clear_aggregate_col_stats_cache_result() + try: + self._handler.clear_aggregate_col_stats_cache() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("clear_aggregate_col_stats_cache", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_get_aggr_stats_for(self, seqid, iprot, oprot): args = get_aggr_stats_for_args() args.read(iprot) @@ -35349,6 +35401,118 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class clear_aggregate_col_stats_cache_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('clear_aggregate_col_stats_cache_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class clear_aggregate_col_stats_cache_result: + """ + Attributes: + - o1 + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + ) + + def __init__(self, o1=None,): + self.o1 = o1 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('clear_aggregate_col_stats_cache_result') + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.o1) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class get_aggr_stats_for_args: """ Attributes: diff --git standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 39c671a..47b5c1a 100644 --- standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1903,6 +1903,21 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_statistics_req failed: unknown result') end + def clear_aggregate_col_stats_cache() + send_clear_aggregate_col_stats_cache() + recv_clear_aggregate_col_stats_cache() + end + + def send_clear_aggregate_col_stats_cache() + send_message('clear_aggregate_col_stats_cache', Clear_aggregate_col_stats_cache_args) + end + + def recv_clear_aggregate_col_stats_cache() + result = receive_message(Clear_aggregate_col_stats_cache_result) + raise result.o1 unless result.o1.nil? + return + end + def get_aggr_stats_for(request) send_get_aggr_stats_for(request) return recv_get_aggr_stats_for() @@ -5227,6 +5242,17 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_partitions_statistics_req', seqid) end + def process_clear_aggregate_col_stats_cache(seqid, iprot, oprot) + args = read_args(iprot, Clear_aggregate_col_stats_cache_args) + result = Clear_aggregate_col_stats_cache_result.new() + begin + @handler.clear_aggregate_col_stats_cache() + rescue ::MetaException => o1 + result.o1 = o1 + end + write_result(result, oprot, 'clear_aggregate_col_stats_cache', seqid) + end + def process_get_aggr_stats_for(seqid, iprot, oprot) args = read_args(iprot, Get_aggr_stats_for_args) result = Get_aggr_stats_for_result.new() @@ -10787,6 +10813,37 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Clear_aggregate_col_stats_cache_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Clear_aggregate_col_stats_cache_result + include ::Thrift::Struct, ::Thrift::Struct_Union + O1 = 1 + + FIELDS = { + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Get_aggr_stats_for_args include ::Thrift::Struct, ::Thrift::Struct_Union REQUEST = 1 diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index fcd0d44..dc7fa4a 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -3358,6 +3358,11 @@ protected void drop_table_with_environment_context(String catName, String dbname } @Override + public void clearAggregateColStatsCache() throws TException { + client.clear_aggregate_col_stats_cache(); + } + + @Override public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames, partNames); diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index f67761e..1e7fb89 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -3403,6 +3403,12 @@ GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException; /** + * Clears the cache (if enabled) storing aggregate column stats. + * @throws TException thrift transport exception + */ + void clearAggregateColStatsCache() throws TException; + + /** * Get aggregated column stats for a set of partitions. * @param dbName database name * @param tblName table name diff --git standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index 19bc713..3730314 100644 --- standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -2222,6 +2222,7 @@ service ThriftHiveMetastore extends fb303.FacebookService (1:NoSuchObjectException o1, 2:MetaException o2) PartitionsStatsResult get_partitions_statistics_req(1:PartitionsStatsRequest request) throws (1:NoSuchObjectException o1, 2:MetaException o2) + void clear_aggregate_col_stats_cache() throws (1:MetaException o1) AggrStats get_aggr_stats_for(1:PartitionsStatsRequest request) throws (1:NoSuchObjectException o1, 2:MetaException o2) bool set_aggr_stats_for(1:SetPartitionsStatsRequest request) throws diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java index 8e920bb..472bb55 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AggregateStatsCache.java @@ -568,4 +568,8 @@ void updateLastAccessTime() { this.misses = misses; } } + + public void clear() { + cacheStore.clear(); + } } diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 41f399b..ddd6f4d 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -7877,6 +7877,11 @@ public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( } @Override + public void clear_aggregate_col_stats_cache() throws MetaException { + getMS().clear_aggregate_col_stats_cache(); + } + + @Override public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws TException { String catName = request.isSetCatName() ? request.getCatName().toLowerCase() : getDefaultCatalog(conf); diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index b0cbe01..91b9a72 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -1270,6 +1270,12 @@ public ColumnStatistics getTableStats(final String catName, final String dbName, return result; } + public void clearCache() { + if (aggrStatsCache != null) { + aggrStatsCache.clear(); + } + } + public AggrStats aggrColStatsForPartitions(String catName, String dbName, String tableName, List partNames, List colNames, boolean useDensityFunctionForNDVEstimation, double ndvTuner, boolean enableBitVector) throws MetaException { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index f9a4e48..5229e7d 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -8889,6 +8889,11 @@ protected ColumnStatistics getJdoResult( } @Override + public void clear_aggregate_col_stats_cache() { + directSql.clearCache(); + } + + @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, final List partNames, final List colNames, String writeIdList) throws MetaException, NoSuchObjectException { diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java index 03a116a..4961e62 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -1211,6 +1211,11 @@ void dropFunction(String catName, String dbName, String funcName) List getFunctions(String catName, String dbName, String pattern) throws MetaException; /** + * Clear the aggregated stats cache if it is enabled. + */ + void clear_aggregate_col_stats_cache(); + + /** * Get aggregated stats for a table or partition(s). * @param catName catalog name. * @param dbName database name. diff --git standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java index bded743..fa95ac1 100644 --- standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java +++ standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java @@ -2211,6 +2211,11 @@ public boolean deletePartitionColumnStatistics(String catName, String dbName, St } @Override + public void clear_aggregate_col_stats_cache() { + rawStore.clear_aggregate_col_stats_cache(); + } + + @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException { return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, null); diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index c13e538..036d961 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -861,6 +861,10 @@ public Function getFunction(String catName, String dbName, String funcName) } @Override + public void clear_aggregate_col_stats_cache() { + } + + @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index e943f17..f9af8f4 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -853,6 +853,10 @@ public Function getFunction(String catName, String dbName, String funcName) } @Override + public void clear_aggregate_col_stats_cache() { + } + + @Override public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List partNames, List colNames) throws MetaException { diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 481abbc..85d1210 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -2667,6 +2667,11 @@ protected void drop_table_with_environment_context(String dbname, String name, } @Override + public void clearAggregateColStatsCache() throws TException { + client.clear_aggregate_col_stats_cache(); + } + + @Override public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { if (colNames.isEmpty() || partNames.isEmpty()) {