diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java index 314ed2b..0b442a5 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/StreamUtils.java @@ -120,7 +120,7 @@ public class StreamUtils { /** * Reads a varInt value stored in an array. - * + * * @param input * Input array where the varInt is available * @param offset @@ -198,4 +198,14 @@ public class StreamUtils { out.write((byte) (0xff & (v >> 8))); out.write((byte) (0xff & v)); } + + public static long readLong(InputStream in) throws IOException { + long result = 0; + for (int shift = 56; shift >= 0; shift -= 8) { + long x = in.read(); + if (x < 0) throw new IOException("EOF"); + result |= (x << shift); + } + return result; + } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java new file mode 100644 index 0000000..a0006ed --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; +import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage; +import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage; + +/** + * Helper to convert Exceptions and StackTraces from/to protobuf. + * (see ErrorHandling.proto for the internal of the proto messages) + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class ForeignExceptionUtil { + private ForeignExceptionUtil() { } + + public static IOException toIOException(final ForeignExceptionMessage eem) { + GenericExceptionMessage gem = eem.getGenericException(); + StackTraceElement[] trace = toStackTrace(gem.getTraceList()); + RemoteException re = new RemoteException(gem.getClassName(), gem.getMessage()); + re.setStackTrace(trace); + return re.unwrapRemoteException(); + } + + public static ForeignExceptionMessage toProtoForeignException(String source, Throwable t) { + GenericExceptionMessage.Builder gemBuilder = GenericExceptionMessage.newBuilder(); + gemBuilder.setClassName(t.getClass().getName()); + if (t.getMessage() != null) { + gemBuilder.setMessage(t.getMessage()); + } + // set the stack trace, if there is one + List stack = toProtoStackTraceElement(t.getStackTrace()); + if (stack != null) { + gemBuilder.addAllTrace(stack); + } + GenericExceptionMessage payload = gemBuilder.build(); + ForeignExceptionMessage.Builder exception = ForeignExceptionMessage.newBuilder(); + exception.setGenericException(payload).setSource(source); + return exception.build(); + } + + /** + * Convert a stack trace to list of {@link StackTraceElement}. + * @param trace the stack trace to convert to protobuf message + * @return null if the passed stack is null. + */ + public static List toProtoStackTraceElement(StackTraceElement[] trace) { + // if there is no stack trace, ignore it and just return the message + if (trace == null) return null; + // build the stack trace for the message + List pbTrace = new ArrayList(trace.length); + for (StackTraceElement elem : trace) { + StackTraceElementMessage.Builder stackBuilder = StackTraceElementMessage.newBuilder(); + stackBuilder.setDeclaringClass(elem.getClassName()); + if (elem.getFileName() != null) { + stackBuilder.setFileName(elem.getFileName()); + } + stackBuilder.setLineNumber(elem.getLineNumber()); + stackBuilder.setMethodName(elem.getMethodName()); + pbTrace.add(stackBuilder.build()); + } + return pbTrace; + } + + /** + * Unwind a serialized array of {@link StackTraceElementMessage}s to a + * {@link StackTraceElement}s. + * @param traceList list that was serialized + * @return the deserialized list or null if it couldn't be unwound (e.g. wasn't set on + * the sender). + */ + public static StackTraceElement[] toStackTrace(List traceList) { + if (traceList == null || traceList.size() == 0) { + return new StackTraceElement[0]; // empty array + } + StackTraceElement[] trace = new StackTraceElement[traceList.size()]; + for (int i = 0; i < traceList.size(); i++) { + StackTraceElementMessage elem = traceList.get(i); + trace[i] = new StackTraceElement( + elem.getDeclaringClass(), elem.getMethodName(), + elem.hasFileName() ? elem.getFileName() : null, + elem.getLineNumber()); + } + return trace; + } +} \ No newline at end of file diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml new file mode 100644 index 0000000..9683db2 --- /dev/null +++ b/hbase-procedure/pom.xml @@ -0,0 +1,181 @@ + + + + 4.0.0 + + hbase + org.apache.hbase + 2.0.0-SNAPSHOT + .. + + + hbase-procedure + HBase - Procedure + Procedure Framework + + + + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + ${maven.assembly.version} + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + test + + test + + + true + + + + + + + + + + org.apache.hbase + hbase-common + ${project.version} + tests + + + org.apache.hbase + hbase-annotations + test-jar + test + + + org.apache.hbase + hbase-protocol + + + org.apache.hbase + hbase-common + + + com.google.guava + guava + + + commons-logging + commons-logging + + + + + + + hadoop-1.1 + + + + hadoop.profile1.1 + + + + + org.apache.hadoop + hadoop-core + + + + + hadoop-1.0 + + + hadoop.profile + 1.0 + + + + + org.apache.hadoop + hadoop-core + + + + + + hadoop-2.0 + + + + !hadoop.profile + + + + + org.apache.hadoop + hadoop-common + + + + + + hadoop-3.0 + + + hadoop.profile + 3.0 + + + + 3.0-SNAPSHOT + + + + org.apache.hadoop + hadoop-common + + + + + diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java new file mode 100644 index 0000000..1c3be2d --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class OnePhaseProcedure extends Procedure { + // TODO (e.g. used by online snapshots) +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java new file mode 100644 index 0000000..338fcad --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -0,0 +1,680 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.lang.reflect.Constructor; +import java.lang.reflect.Modifier; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeoutException; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.protobuf.ByteString; + +/** + * Base Procedure class responsible to handle the Procedure Metadata + * e.g. state, startTime, lastUpdate, stack-indexes, ... + * + * execute() is called each time the procedure is executed. + * it may be called multiple times in case of failure and restart, so the + * code must be idempotent. + * the return is a set of sub-procedures or null in case the procedure doesn't + * have sub-procedures. Once the sub-procedures are successfully completed + * the execute() method is called again, you should think at it as a stack: + * -> step 1 + * ---> step 2 + * -> step 1 + * + * rollback() is called when the procedure or one of the sub-procedures is failed. + * the rollback step is supposed to cleanup the resources created during the + * execute() step. in case of failure and restart rollback() may be called + * multiple times, so the code must be idempotent. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public abstract class Procedure implements Comparable { + // unchanged after initialization + private String owner = null; + private Long parentProcId = null; + private Long procId = null; + private long startTime; + + // runtime state, updated every operation + private ProcedureState state = ProcedureState.INITIALIZING; + private Integer timeout = null; + private int[] stackIndexes = null; + private int childrenLatch = 0; + private long lastUpdate; + + private RemoteProcedureException exception = null; + private byte[] result = null; + + /** + * The main code of the procedure. It must be idempotent since execute() + * may be called multiple time in case of machine failure in the middle + * of the execution. + * @return a set of sub-procedures or null if there is nothing else to execute. + */ + protected abstract Procedure[] execute(TEnvironment env) + throws ProcedureYieldException; + + /** + * The code to undo what done by the execute() code. + * It is called when the procedure or one of the sub-procedure failed or an + * abort was requested. It should cleanup all the resources created by + * the execute() call. The implementation must be idempotent since rollback() + * may be called multiple time in case of machine failure in the middle + * of the execution. + * @throws IOException temporary failure, the rollback will retry later + */ + protected abstract void rollback(TEnvironment env) + throws IOException; + + /** + * The abort() call is asynchronous and each procedure must decide how to deal + * with that, if they want to be abortable. The simplest implementation + * is to have an AtomicBoolean set in the abort() method and then the execute() + * will check if the abort flag is set or not. + * abort() may be called multiple times from the client, so the implementation + * must be idempotent. + * + * NOTE: abort() is not like Thread.interrupt() it is just a notification + * that allows the procedure implementor where to abort to avoid leak and + * have a better control on what was executed and what not. + */ + protected abstract boolean abort(TEnvironment env); + + /** + * The user-level code of the procedure may have some state to + * persist (e.g. input arguments) to be able to resume on failure. + * @param stream the stream that will contain the user serialized data + */ + protected abstract void serializeStateData(final OutputStream stream) + throws IOException; + + /** + * Called on store load to allow the user to decode the previously serialized + * state. + * @param stream the stream that contains the user serialized data + */ + protected abstract void deserializeStateData(final InputStream stream) + throws IOException; + + /** + * The user should override this method, and try to take a lock if necessary. + * A lock can be anything, and it is up to the implementor. + * Example: in our Master we can execute request in parallel for different tables + * create t1 and create t2 can be executed at the same time. + * anything else on t1/t2 is queued waiting that specific table create to happen. + * + * @return true if the lock was acquired and false otherwise + */ + protected boolean acquireLock(final TEnvironment env) { + return true; + } + + /** + * The user should override this method, and release lock if necessary. + */ + protected void releaseLock(final TEnvironment env) { + // no-op + } + + /** + * Called when the procedure is loaded for replay. + * The procedure implementor may use this method to perform some quick + * operation before replay. + * e.g. failing the procedure if the state on replay may be unknown. + */ + protected void beforeReplay(final TEnvironment env) { + // no-op + } + + /** + * Called when the procedure is marked as completed (success or rollback). + * The procedure implementor may use this method to cleanup in-memory states. + * This operation will not be retried on failure. + */ + protected void completionCleanup(final TEnvironment env) { + // no-op + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + toStringClassDetails(sb); + + if (procId != null) { + sb.append(" id="); + sb.append(getProcId()); + } + + if (hasParent()) { + sb.append(" parent="); + sb.append(getParentProcId()); + } + + if (hasOwner()) { + sb.append(" owner="); + sb.append(getOwner()); + } + + sb.append(" state="); + sb.append(getState()); + return sb.toString(); + } + + /** + * Extend the toString() information with the procedure details + * e.g. className and parameters + * @param builder the string builder to use to append the proc specific information + */ + protected void toStringClassDetails(StringBuilder builder) { + builder.append(getClass().getName()); + } + + /** + * @return the serialized result if any, otherwise null + */ + public byte[] getResult() { + return result; + } + + /** + * The procedure may leave a "result" on completion. + * @param result the serialized result that will be passed to the client + */ + protected void setResult(final byte[] result) { + this.result = result; + } + + public long getProcId() { + return procId; + } + + public boolean hasParent() { + return parentProcId != null; + } + + public boolean hasException() { + return exception != null; + } + + public boolean hasTimeout() { + return timeout != null; + } + + public long getParentProcId() { + return parentProcId; + } + + /** + * @return true if the procedure has failed. + * true may mean failed but not yet rolledback or failed and rolledback. + */ + public synchronized boolean isFailed() { + return exception != null || state == ProcedureState.ROLLEDBACK; + } + + /** + * @return true if the procedure is finished successfully. + */ + public synchronized boolean isSuccess() { + return state == ProcedureState.FINISHED && exception == null; + } + + /** + * @return true if the procedure is finished. The Procedure may be completed + * successfuly or failed and rolledback. + */ + public synchronized boolean isFinished() { + switch (state) { + case ROLLEDBACK: + return true; + case FINISHED: + return exception == null; + default: + break; + } + return false; + } + + /** + * @return true if the procedure is waiting for a child to finish or for an external event. + */ + public synchronized boolean isWaiting() { + switch (state) { + case WAITING: + case WAITING_TIMEOUT: + return true; + default: + break; + } + return false; + } + + public synchronized RemoteProcedureException getException() { + return exception; + } + + public long getStartTime() { + return startTime; + } + + public synchronized long getLastUpdate() { + return lastUpdate; + } + + public synchronized long elapsedTime() { + return lastUpdate - startTime; + } + + /** + * @param timeout timeout in msec + */ + protected void setTimeout(final int timeout) { + this.timeout = timeout; + } + + /** + * @return the timeout in msec + */ + public int getTimeout() { + return timeout; + } + + /** + * @return the remaining time before the timeout + */ + public long getTimeRemaining() { + return Math.max(0, timeout - (EnvironmentEdgeManager.currentTime() - startTime)); + } + + protected void setOwner(final String owner) { + this.owner = StringUtils.isEmpty(owner) ? null : owner; + } + + public String getOwner() { + return owner; + } + + public boolean hasOwner() { + return owner != null; + } + + @VisibleForTesting + @InterfaceAudience.Private + protected synchronized void setState(final ProcedureState state) { + this.state = state; + updateTimestamp(); + } + + @InterfaceAudience.Private + protected synchronized ProcedureState getState() { + return state; + } + + protected void setFailure(final String source, final Throwable cause) { + setFailure(new RemoteProcedureException(source, cause)); + } + + protected synchronized void setFailure(final RemoteProcedureException exception) { + this.exception = exception; + if (!isFinished()) { + setState(ProcedureState.FINISHED); + } + } + + protected void setAbortFailure(final String source, final String msg) { + setFailure(source, new ProcedureAbortedException(msg)); + } + + @InterfaceAudience.Private + protected synchronized boolean setTimeoutFailure() { + if (state == ProcedureState.WAITING_TIMEOUT) { + long timeDiff = EnvironmentEdgeManager.currentTime() - lastUpdate; + setFailure("ProcedureExecutor", new TimeoutException( + "Operation timed out after " + StringUtils.humanTimeDiff(timeDiff))); + return true; + } + return false; + } + + /** + * Called by the ProcedureExecutor to assign the ID to the newly created procedure. + */ + @VisibleForTesting + @InterfaceAudience.Private + protected void setProcId(final long procId) { + this.procId = procId; + this.startTime = EnvironmentEdgeManager.currentTime(); + setState(ProcedureState.RUNNABLE); + } + + /** + * Called by the ProcedureExecutor to assign the parent to the newly created procedure. + */ + @InterfaceAudience.Private + protected void setParentProcId(final long parentProcId) { + this.parentProcId = parentProcId; + } + + /** + * Internal method called by the ProcedureExecutor that starts the + * user-level code execute(). + */ + @InterfaceAudience.Private + protected Procedure[] doExecute(final TEnvironment env) + throws ProcedureYieldException { + try { + updateTimestamp(); + return execute(env); + } finally { + updateTimestamp(); + } + } + + /** + * Internal method called by the ProcedureExecutor that starts the + * user-level code rollback(). + */ + @InterfaceAudience.Private + protected void doRollback(final TEnvironment env) throws IOException { + try { + updateTimestamp(); + rollback(env); + } finally { + updateTimestamp(); + } + } + + /** + * Called on store load to initialize the Procedure internals after + * the creation/deserialization. + */ + @InterfaceAudience.Private + protected void setStartTime(final long startTime) { + this.startTime = startTime; + } + + /** + * Called on store load to initialize the Procedure internals after + * the creation/deserialization. + */ + private synchronized void setLastUpdate(final long lastUpdate) { + this.lastUpdate = lastUpdate; + } + + protected synchronized void updateTimestamp() { + this.lastUpdate = EnvironmentEdgeManager.currentTime(); + } + + /** + * Called by the ProcedureExecutor on procedure-load to restore the latch state + */ + @InterfaceAudience.Private + protected synchronized void setChildrenLatch(final int numChildren) { + this.childrenLatch = numChildren; + } + + /** + * Called by the ProcedureExecutor on procedure-load to restore the latch state + */ + @InterfaceAudience.Private + protected synchronized void incChildrenLatch() { + // TODO: can this be inferred from the stack? I think so... + this.childrenLatch++; + } + + /** + * Called by the ProcedureExecutor to notify that one of the sub-procedures + * has completed. + */ + @InterfaceAudience.Private + protected synchronized boolean childrenCountDown() { + assert childrenLatch > 0; + return --childrenLatch == 0; + } + + /** + * Called by the RootProcedureState on procedure execution. + * Each procedure store its stack-index positions. + */ + @InterfaceAudience.Private + protected synchronized void addStackIndex(final int index) { + if (stackIndexes == null) { + stackIndexes = new int[] { index }; + } else { + int count = stackIndexes.length; + stackIndexes = Arrays.copyOf(stackIndexes, count + 1); + stackIndexes[count] = index; + } + } + + @InterfaceAudience.Private + protected synchronized boolean removeStackIndex() { + if (stackIndexes.length > 1) { + stackIndexes = Arrays.copyOf(stackIndexes, stackIndexes.length - 1); + return false; + } else { + stackIndexes = null; + return true; + } + } + + /** + * Called on store load to initialize the Procedure internals after + * the creation/deserialization. + */ + @InterfaceAudience.Private + protected synchronized void setStackIndexes(final List stackIndexes) { + this.stackIndexes = new int[stackIndexes.size()]; + for (int i = 0; i < this.stackIndexes.length; ++i) { + this.stackIndexes[i] = stackIndexes.get(i); + } + } + + @InterfaceAudience.Private + protected synchronized boolean wasExecuted() { + return stackIndexes != null; + } + + @InterfaceAudience.Private + protected synchronized int[] getStackIndexes() { + return stackIndexes; + } + + @Override + public int compareTo(final Procedure other) { + long diff = getProcId() - other.getProcId(); + return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; + } + + /* + * Helper to lookup the root Procedure ID given a specified procedure. + */ + @InterfaceAudience.Private + protected static Long getRootProcedureId(final Map procedures, Procedure proc) { + while (proc.hasParent()) { + proc = procedures.get(proc.getParentProcId()); + if (proc == null) return null; + } + return proc.getProcId(); + } + + protected static Procedure newInstance(final String className) throws IOException { + try { + Class clazz = Class.forName(className); + if (!Modifier.isPublic(clazz.getModifiers())) { + throw new Exception("the " + clazz + " class is not public"); + } + + Constructor ctor = clazz.getConstructor(); + assert ctor != null : "no constructor found"; + if (!Modifier.isPublic(ctor.getModifiers())) { + throw new Exception("the " + clazz + " constructor is not public"); + } + return (Procedure)ctor.newInstance(); + } catch (Exception e) { + throw new IOException("The procedure class " + className + + " must be accessible and have an empty constructor", e); + } + } + + protected static void validateClass(final Procedure proc) throws IOException { + try { + Class clazz = proc.getClass(); + if (!Modifier.isPublic(clazz.getModifiers())) { + throw new Exception("the " + clazz + " class is not public"); + } + + Constructor ctor = clazz.getConstructor(); + assert ctor != null; + if (!Modifier.isPublic(ctor.getModifiers())) { + throw new Exception("the " + clazz + " constructor is not public"); + } + } catch (Exception e) { + throw new IOException("The procedure class " + proc.getClass().getName() + + " must be accessible and have an empty constructor", e); + } + } + + /** + * Helper to convert the procedure to protobuf. + * Used by ProcedureStore implementations. + */ + @InterfaceAudience.Private + public static ProcedureProtos.Procedure convert(final Procedure proc) + throws IOException { + Preconditions.checkArgument(proc != null); + validateClass(proc); + + ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder() + .setClassName(proc.getClass().getName()) + .setProcId(proc.getProcId()) + .setState(proc.getState()) + .setStartTime(proc.getStartTime()) + .setLastUpdate(proc.getLastUpdate()); + + if (proc.hasParent()) { + builder.setParentId(proc.getParentProcId()); + } + + if (proc.hasTimeout()) { + builder.setTimeout(proc.getTimeout()); + } + + if (proc.hasOwner()) { + builder.setOwner(proc.getOwner()); + } + + int[] stackIds = proc.getStackIndexes(); + if (stackIds != null) { + for (int i = 0; i < stackIds.length; ++i) { + builder.addStackId(stackIds[i]); + } + } + + if (proc.hasException()) { + RemoteProcedureException exception = proc.getException(); + builder.setException( + RemoteProcedureException.toProto(exception.getSource(), exception.getCause())); + } + + byte[] result = proc.getResult(); + if (result != null) { + builder.setResult(ByteStringer.wrap(result)); + } + + ByteString.Output stateStream = ByteString.newOutput(); + proc.serializeStateData(stateStream); + if (stateStream.size() > 0) { + builder.setStateData(stateStream.toByteString()); + } + + return builder.build(); + } + + /** + * Helper to convert the protobuf procedure. + * Used by ProcedureStore implementations. + * + * TODO: OPTIMIZATION: some of the field never change during the execution + * (e.g. className, procId, parentId, ...). + * We can split in 'data' and 'state', and the store + * may take advantage of it by storing the data only on insert(). + */ + @InterfaceAudience.Private + public static Procedure convert(final ProcedureProtos.Procedure proto) + throws IOException { + // Procedure from class name + Procedure proc = Procedure.newInstance(proto.getClassName()); + + // set fields + proc.setProcId(proto.getProcId()); + proc.setState(proto.getState()); + proc.setStartTime(proto.getStartTime()); + proc.setLastUpdate(proto.getLastUpdate()); + + if (proto.hasParentId()) { + proc.setParentProcId(proto.getParentId()); + } + + if (proto.hasOwner()) { + proc.setOwner(proto.getOwner()); + } + + if (proto.hasTimeout()) { + proc.setTimeout(proto.getTimeout()); + } + + if (proto.getStackIdCount() > 0) { + proc.setStackIndexes(proto.getStackIdList()); + } + + if (proto.hasException()) { + assert proc.getState() == ProcedureState.FINISHED || + proc.getState() == ProcedureState.ROLLEDBACK : + "The procedure must be failed (waiting to rollback) or rolledback"; + proc.setFailure(RemoteProcedureException.fromProto(proto.getException())); + } + + if (proto.hasResult()) { + proc.setResult(proto.getResult().toByteArray()); + } + + // we want to call deserialize even when the stream is empty, mainly for testing. + proc.deserializeStateData(proto.getStateData().newInput()); + + return proc; + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java new file mode 100644 index 0000000..2e409cf --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Thrown when a procedure is aborted + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ProcedureAbortedException extends ProcedureException { + /** default constructor */ + public ProcedureAbortedException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public ProcedureAbortedException(String s) { + super(s); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java new file mode 100644 index 0000000..9f922b1 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ProcedureException extends IOException { + /** default constructor */ + public ProcedureException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public ProcedureException(String s) { + super(s); + } + + public ProcedureException(Throwable t) { + super(t); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java new file mode 100644 index 0000000..8588315 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -0,0 +1,1077 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.HashSet; +import java.util.TreeSet; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue; +import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue.TimeoutRetriever; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Pair; + +import com.google.common.base.Preconditions; + +/** + * Thread Pool that executes the submitted procedures. + * The executor has a ProcedureStore associated. + * Each operation is logged and on restart the pending procedures are resumed. + * + * Unless the Procedure code throws an error (e.g. invalid user input) + * the procedure will complete (at some point in time), On restart the pending + * procedures are resumed and the once failed will be rolledback. + * + * The user can add procedures to the executor via submitProcedure(proc) + * check for the finished state via isFinished(procId) + * and get the result via getResult(procId) + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureExecutor { + private static final Log LOG = LogFactory.getLog(ProcedureExecutor.class); + + Testing testing = null; + public static class Testing { + protected boolean killBeforeStoreUpdate = false; + protected boolean toggleKillBeforeStoreUpdate = false; + + protected boolean shouldKillBeforeStoreUpdate() { + final boolean kill = this.killBeforeStoreUpdate; + if (this.toggleKillBeforeStoreUpdate) { + this.killBeforeStoreUpdate = !kill; + LOG.warn("Toggle Kill before store update to: " + this.killBeforeStoreUpdate); + } + return kill; + } + } + + public interface ProcedureExecutorListener { + void procedureLoaded(long procId); + void procedureAdded(long procId); + void procedureFinished(long procId); + } + + /** + * Used by the TimeoutBlockingQueue to get the timeout interval of the procedure + */ + private static class ProcedureTimeoutRetriever implements TimeoutRetriever { + @Override + public long getTimeout(Procedure proc) { + return proc.getTimeRemaining(); + } + + @Override + public TimeUnit getTimeUnit(Procedure proc) { + return TimeUnit.MILLISECONDS; + } + } + + /** + * Internal cleaner that removes the completed procedure results after a TTL. + * NOTE: This is a special case handled in timeoutLoop(). + * + * Since the client code looks more or less like: + * procId = master.doOperation() + * while (master.getProcResult(procId) == ProcInProgress); + * The master should not throw away the proc result as soon as the procedure is done + * but should wait a result request from the client (see executor.removeResult(procId)) + * The client will call something like master.isProcDone() or master.getProcResult() + * which will return the result/state to the client, and it will mark the completed + * proc as ready to delete. note that the client may not receive the response from + * the master (e.g. master failover) so, if we delay a bit the real deletion of + * the proc result the client will be able to get the result the next try. + */ + private static class CompletedProcedureCleaner extends Procedure { + private static final Log LOG = LogFactory.getLog(CompletedProcedureCleaner.class); + + private static final String CLEANER_INTERVAL_CONF_KEY = "hbase.procedure.cleaner.interval"; + private static final int DEFAULT_CLEANER_INTERVAL = 30 * 1000; // 30sec + + private static final String EVICT_TTL_CONF_KEY = "hbase.procedure.cleaner.evict.ttl"; + private static final int DEFAULT_EVICT_TTL = 15 * 60000; // 15min + + private static final String EVICT_ACKED_TTL_CONF_KEY ="hbase.procedure.cleaner.acked.evict.ttl"; + private static final int DEFAULT_ACKED_EVICT_TTL = 5 * 60000; // 5min + + private final Map completed; + private final ProcedureStore store; + private final Configuration conf; + + public CompletedProcedureCleaner(final Configuration conf, final ProcedureStore store, + final Map completedMap) { + // set the timeout interval that triggers the periodic-procedure + setTimeout(conf.getInt(CLEANER_INTERVAL_CONF_KEY, DEFAULT_CLEANER_INTERVAL)); + this.completed = completedMap; + this.store = store; + this.conf = conf; + } + + public void periodicExecute(final TEnvironment env) { + if (completed.isEmpty()) { + LOG.debug("no completed procedures to cleanup"); + return; + } + + final long evictTtl = conf.getInt(EVICT_TTL_CONF_KEY, DEFAULT_EVICT_TTL); + final long evictAckTtl = conf.getInt(EVICT_ACKED_TTL_CONF_KEY, DEFAULT_ACKED_EVICT_TTL); + + long now = EnvironmentEdgeManager.currentTime(); + Iterator> it = completed.entrySet().iterator(); + while (it.hasNext() && store.isRunning()) { + Map.Entry entry = it.next(); + ProcedureResult result = entry.getValue(); + + // TODO: Select TTL based on Procedure type + if ((result.hasClientAckTime() && (now - result.getClientAckTime()) >= evictAckTtl) || + (now - result.getLastUpdate()) >= evictTtl) { + LOG.debug("Evict completed procedure " + entry.getKey()); + store.delete(entry.getKey()); + it.remove(); + } + } + } + + @Override + protected Procedure[] execute(final TEnvironment env) { + throw new UnsupportedOperationException(); + } + + @Override + protected void rollback(final TEnvironment env) { + throw new UnsupportedOperationException(); + } + + @Override + protected boolean abort(final TEnvironment env) { + throw new UnsupportedOperationException(); + } + + @Override + public void serializeStateData(final OutputStream stream) { + throw new UnsupportedOperationException(); + } + + @Override + public void deserializeStateData(final InputStream stream) { + throw new UnsupportedOperationException(); + } + } + + /** + * Map the the procId returned by submitProcedure(), the Root-ProcID, to the ProcedureResult. + * Once a Root-Procedure completes (success or failure), the result will be added to this map. + * The user of ProcedureExecutor should call getResult(procId) to get the result. + */ + private final ConcurrentHashMap completed = + new ConcurrentHashMap(); + + /** + * Map the the procId returned by submitProcedure(), the Root-ProcID, to the RootProcedureState. + * The RootProcedureState contains the execution stack of the Root-Procedure, + * It is added to the map by submitProcedure() and removed on procedure completion. + */ + private final ConcurrentHashMap rollbackStack = + new ConcurrentHashMap(); + + /** + * Helper map to lookup the live procedures by ID. + * This map contains every procedure. root-procedures and subprocedures. + */ + private final ConcurrentHashMap procedures = + new ConcurrentHashMap(); + + /** + * Timeout Queue that contains Procedures in a WAITING_TIMEOUT state + * or periodic procedures. + */ + private final TimeoutBlockingQueue waitingTimeout = + new TimeoutBlockingQueue(new ProcedureTimeoutRetriever()); + + /** + * Queue that contains runnable procedures. + */ + private final ProcedureRunnableSet runnables; + + // TODO + private final ReentrantLock submitLock = new ReentrantLock(); + private final AtomicLong lastProcId = new AtomicLong(-1); + + private final CopyOnWriteArrayList listeners = + new CopyOnWriteArrayList(); + + private final AtomicInteger activeExecutorCount = new AtomicInteger(0); + private final AtomicBoolean running = new AtomicBoolean(false); + private final TEnvironment environment; + private final ProcedureStore store; + private final Configuration conf; + + private Thread[] threads; + + public ProcedureExecutor(final Configuration conf, final TEnvironment environment, + final ProcedureStore store) { + this(conf, environment, store, new ProcedureSimpleRunQueue()); + } + + public ProcedureExecutor(final Configuration conf, final TEnvironment environment, + final ProcedureStore store, final ProcedureRunnableSet runqueue) { + this.environment = environment; + this.runnables = runqueue; + this.store = store; + this.conf = conf; + } + + private List> load() throws IOException { + Preconditions.checkArgument(completed.isEmpty()); + Preconditions.checkArgument(rollbackStack.isEmpty()); + Preconditions.checkArgument(procedures.isEmpty()); + Preconditions.checkArgument(waitingTimeout.isEmpty()); + Preconditions.checkArgument(runnables.size() == 0); + + // 1. Load the procedures + Iterator loader = store.load(); + if (loader == null) { + lastProcId.set(0); + return null; + } + + long logMaxProcId = 0; + int runnablesCount = 0; + while (loader.hasNext()) { + Procedure proc = loader.next(); + proc.beforeReplay(getEnvironment()); + procedures.put(proc.getProcId(), proc); + logMaxProcId = Math.max(logMaxProcId, proc.getProcId()); + LOG.debug("Loading procedure state=" + proc.getState() + + " isFailed=" + proc.hasException() + ": " + proc); + if (!proc.hasParent() && !proc.isFinished()) { + rollbackStack.put(proc.getProcId(), new RootProcedureState()); + } + if (proc.getState() == ProcedureState.RUNNABLE) { + runnablesCount++; + } + } + assert lastProcId.get() < 0; + lastProcId.set(logMaxProcId); + + // 2. Initialize the stacks + TreeSet runnableSet = null; + HashSet waitingSet = null; + for (final Procedure proc: procedures.values()) { + Long rootProcId = getRootProcedureId(proc); + if (rootProcId == null) { + // The 'proc' was ready to run but the root procedure was rolledback? + runnables.addBack(proc); + continue; + } + + if (!proc.hasParent() && proc.isFinished()) { + LOG.debug("The procedure is completed state=" + proc.getState() + + " isFailed=" + proc.hasException() + ": " + proc); + assert !rollbackStack.containsKey(proc.getProcId()); + completed.put(proc.getProcId(), newResultFromProcedure(proc)); + continue; + } + + if (proc.hasParent() && !proc.isFinished()) { + Procedure parent = procedures.get(proc.getParentProcId()); + // corrupted procedures are handled later at step 3 + if (parent != null) { + parent.incChildrenLatch(); + } + } + + RootProcedureState procStack = rollbackStack.get(rootProcId); + procStack.loadStack(proc); + + switch (proc.getState()) { + case RUNNABLE: + if (runnableSet == null) { + runnableSet = new TreeSet(); + } + runnableSet.add(proc); + break; + case WAITING_TIMEOUT: + if (waitingSet == null) { + waitingSet = new HashSet(); + } + waitingSet.add(proc); + break; + case FINISHED: + if (proc.hasException()) { + // add the proc to the runnables to perform the rollback + runnables.addBack(proc); + break; + } + case ROLLEDBACK: + case INITIALIZING: + String msg = "Unexpected " + proc.getState() + " state for " + proc; + LOG.error(msg); + throw new UnsupportedOperationException(msg); + default: + break; + } + } + + // 3. Validate the stacks + List> corrupted = null; + Iterator> itStack = rollbackStack.entrySet().iterator(); + while (itStack.hasNext()) { + Map.Entry entry = itStack.next(); + RootProcedureState procStack = entry.getValue(); + if (procStack.isValid()) continue; + + for (Procedure proc: procStack.getSubprocedures()) { + procedures.remove(proc.getProcId()); + if (runnableSet != null) runnableSet.remove(proc); + if (waitingSet != null) waitingSet.remove(proc); + } + itStack.remove(); + if (corrupted == null) { + corrupted = new ArrayList>(); + } + corrupted.add(entry); + } + + // 4. Push the runnables + if (runnableSet != null) { + // TODO: See ProcedureWALFormatReader.readInitEntry() some procedure + // may be started way before this stuff. + for (Procedure proc: runnableSet) { + if (!proc.hasParent()) { + sendProcedureLoadedNotification(proc.getProcId()); + } + runnables.addBack(proc); + } + } + return corrupted; + } + + public void start(int numThreads) throws IOException { + if (running.getAndSet(true)) { + LOG.warn("Already running"); + return; + } + + // We have numThreads executor + one timer thread used for timing out + // procedures and triggering periodic procedures. + threads = new Thread[numThreads + 1]; + LOG.info("Starting procedure executor threads=" + threads.length); + + // Initialize procedures executor + for (int i = 0; i < numThreads; ++i) { + threads[i] = new Thread() { + @Override + public void run() { + execLoop(); + } + }; + } + + // Initialize procedures timeout handler (this is the +1 thread) + threads[numThreads] = new Thread() { + @Override + public void run() { + timeoutLoop(); + } + }; + + // Acquire the store lease. + store.recoverLease(); + + // TODO: Split in two steps. + // TODO: Handle corrupted procedure returned (probably just a WARN) + // The first one will make sure that we have the latest id, + // so we can start the threads and accept new procedures. + // The second step will do the actual load of old procedures. + load(); + + // Start the executors. Here we must have the lastProcId set. + for (int i = 0; i < threads.length; ++i) { + threads[i].start(); + } + + // Add completed cleaner + waitingTimeout.add(new CompletedProcedureCleaner(conf, store, completed)); + } + + public void stop() { + if (!running.getAndSet(false)) { + return; + } + + LOG.info("Stopping the procedure executor"); + runnables.signalAll(); + waitingTimeout.signalAll(); + } + + public void join() { + boolean interrupted = false; + + for (int i = 0; i < threads.length; ++i) { + try { + threads[i].join(); + } catch (InterruptedException ex) { + interrupted = true; + } + } + + if (interrupted) { + Thread.currentThread().interrupt(); + } + + completed.clear(); + rollbackStack.clear(); + procedures.clear(); + waitingTimeout.clear(); + runnables.clear(); + lastProcId.set(-1); + } + + public boolean isRunning() { + return running.get(); + } + + /** + * @return the number of execution threads. + */ + public int getNumThreads() { + return threads == null ? 0 : (threads.length - 1); + } + + public int getActiveExecutorCount() { + return activeExecutorCount.get(); + } + + public TEnvironment getEnvironment() { + return this.environment; + } + + public ProcedureStore getStore() { + return this.store; + } + + public void registerListener(ProcedureExecutorListener listener) { + this.listeners.add(listener); + } + + public boolean unregisterListener(ProcedureExecutorListener listener) { + return this.listeners.remove(listener); + } + + /** + * Add a new root-procedure to the executor. + * @param proc the new procedure to execute. + * @return the procedure id, that can be used to monitor the operation + */ + public long submitProcedure(final Procedure proc) { + Preconditions.checkArgument(proc.getState() == ProcedureState.INITIALIZING); + Preconditions.checkArgument(isRunning()); + Preconditions.checkArgument(lastProcId.get() >= 0); + Preconditions.checkArgument(!proc.hasParent()); + + // Initialize the Procedure ID + proc.setProcId(nextProcId()); + + // Commit the transaction + store.insert(proc, null); + LOG.debug("procedure " + proc + " added to the store"); + + // Create the rollback stack for the procedure + RootProcedureState stack = new RootProcedureState(); + rollbackStack.put(proc.getProcId(), stack); + + // Submit the new subprocedures + assert !procedures.containsKey(proc.getProcId()); + procedures.put(proc.getProcId(), proc); + sendProcedureAddedNotification(proc.getProcId()); + runnables.addBack(proc); + return proc.getProcId(); + } + + public ProcedureResult getResult(final long procId) { + return completed.get(procId); + } + + /** + * Return true if the procedure is finished. + * The state may be "completed successfully" or "failed and rolledback". + * Use getResult() to check the state or get the result data. + * @param procId the ID of the procedure to check + * @return true if the procedure execution is finished, otherwise false. + */ + public boolean isFinished(final long procId) { + return completed.containsKey(procId); + } + + /** + * Return true if the procedure is started. + * @param procId the ID of the procedure to check + * @return true if the procedure execution is started, otherwise false. + */ + public boolean isStarted(final long procId) { + Procedure proc = procedures.get(procId); + if (proc == null) { + return completed.get(procId) != null; + } + return proc.wasExecuted(); + } + + /** + * Mark the specified completed procedure, as ready to remove. + * @param procId the ID of the procedure to remove + */ + public void removeResult(final long procId) { + ProcedureResult result = completed.get(procId); + if (result == null) { + assert !procedures.containsKey(procId) : "procId=" + procId + " is still running"; + LOG.debug("Procedure procId=" + procId + " already removed by the cleaner"); + return; + } + + // The CompletedProcedureCleaner will take care of deletion, once the TTL is expired. + result.setClientAckTime(EnvironmentEdgeManager.currentTime()); + } + + /** + * Send an abort notification the specified procedure. + * Depending on the procedure implementation the abort can be considered or ignored. + * @param procId the procedure to abort + * @return true if the procedure exist and has received the abort, otherwise false. + */ + public boolean abort(final long procId) { + Procedure proc = procedures.get(procId); + if (proc != null) { + return proc.abort(getEnvironment()); + } + return false; + } + + public Map getResults() { + return Collections.unmodifiableMap(completed); + } + + public Procedure getProcedure(final long procId) { + return procedures.get(procId); + } + + protected ProcedureRunnableSet getRunnableSet() { + return runnables; + } + + /** + * Execution loop (N threads) + * while the executor is in a running state, + * fetch a procedure from the runnables queue and start the execution. + */ + private void execLoop() { + while (isRunning()) { + Long procId = runnables.poll(); + Procedure proc = procId != null ? procedures.get(procId) : null; + if (proc == null) continue; + + try { + activeExecutorCount.incrementAndGet(); + execLoop(proc); + } finally { + activeExecutorCount.decrementAndGet(); + } + } + } + + private void execLoop(Procedure proc) { + if (LOG.isTraceEnabled()) { + LOG.trace("trying to start the execution of " + proc); + } + + Long rootProcId = getRootProcedureId(proc); + if (rootProcId == null) { + // The 'proc' was ready to run but the root procedure was rolledback + executeRollback(proc); + return; + } + + RootProcedureState procStack = rollbackStack.get(rootProcId); + if (procStack == null) return; + + do { + // Try to acquire the execution + if (!procStack.acquire(proc)) { + if (procStack.setRollback()) { + // we have the 'rollback-lock' we can start rollingback + if (!executeRollback(rootProcId, procStack)) { + procStack.unsetRollback(); + runnables.yield(proc); + } + } else { + // if we can't rollback means that some child is still running. + // the rollback will be executed after all the children are done. + // If the procedure was never executed, remove and mark it as rolledback. + if (!proc.wasExecuted()) { + if (!executeRollback(proc)) { + runnables.yield(proc); + } + } + } + break; + } + + // Execute the procedure + assert proc.getState() == ProcedureState.RUNNABLE; + if (proc.acquireLock(getEnvironment())) { + execProcedure(procStack, proc); + proc.releaseLock(getEnvironment()); + } else { + runnables.yield(proc); + } + procStack.release(proc); + + // allows to kill the executor before something is stored to the wal. + // useful to test the procedure recovery. + if (testing != null && !isRunning()) { + break; + } + + if (proc.getProcId() == rootProcId && proc.isSuccess()) { + // Finalize the procedure state + LOG.info("Procedure completed in " + + StringUtils.humanTimeDiff(proc.elapsedTime()) + ": " + proc); + procedureFinished(proc); + break; + } + } while (procStack.isFailed()); + } + + private void timeoutLoop() { + while (isRunning()) { + Procedure proc = waitingTimeout.poll(); + if (proc == null) continue; + + if (proc.getTimeRemaining() > 100) { + // got an early wake, maybe a stop? + // re-enqueue the task in case was not a stop or just a signal + waitingTimeout.add(proc); + continue; + } + + // ---------------------------------------------------------------------------- + // TODO-MAYBE: Should we provide a notification to the store with the + // full set of procedures pending and completed to write a compacted + // version of the log (in case is a log)? + // In theory no, procedures are have a short life, so at some point the store + // will have the tracker saying everything is in the last log. + // ---------------------------------------------------------------------------- + + // The CompletedProcedureCleaner is a special case, and it acts as a chore. + // instead of bringing the Chore class in, we reuse this timeout thread for + // this special case. + if (proc instanceof CompletedProcedureCleaner) { + try { + ((CompletedProcedureCleaner)proc).periodicExecute(getEnvironment()); + } catch (Throwable e) { + LOG.error("ignoring CompletedProcedureCleaner exception: " + e.getMessage(), e); + } + proc.setStartTime(EnvironmentEdgeManager.currentTime()); + waitingTimeout.add(proc); + continue; + } + + // The procedure received an "abort-timeout", call abort() and + // add the procedure back in the queue for rollback. + if (proc.setTimeoutFailure()) { + long rootProcId = Procedure.getRootProcedureId(procedures, proc); + RootProcedureState procStack = rollbackStack.get(rootProcId); + procStack.abort(); + store.update(proc); + runnables.addFront(proc); + continue; + } + } + } + + /** + * Execute the rollback of the full procedure stack. + * Once the procedure is rolledback, the root-procedure will be visible as + * finished to user, and the result will be the fatal exception. + */ + private boolean executeRollback(final long rootProcId, final RootProcedureState procStack) { + Procedure rootProc = procedures.get(rootProcId); + RemoteProcedureException exception = rootProc.getException(); + if (exception == null) { + exception = procStack.getException(); + rootProc.setFailure(exception); + store.update(rootProc); + } + + List subprocStack = procStack.getSubprocedures(); + assert subprocStack != null : "called rollback with no steps executed rootProc=" + rootProc; + + int stackTail = subprocStack.size(); + boolean reuseLock = false; + while (stackTail --> 0) { + final Procedure proc = subprocStack.get(stackTail); + + if (!reuseLock && !proc.acquireLock(getEnvironment())) { + // can't take a lock on the procedure, add the root-proc back on the + // queue waiting for the lock availability + return false; + } + + boolean abortRollback = !executeRollback(proc); + abortRollback |= !isRunning() || !store.isRunning(); + + // If the next procedure is the same to this one + // (e.g. StateMachineProcedure reuse the same instance) + // we can avoid to lock/unlock each step + reuseLock = stackTail > 0 && (subprocStack.get(stackTail - 1) == proc) && !abortRollback; + if (!reuseLock) { + proc.releaseLock(getEnvironment()); + } + + // allows to kill the executor before something is stored to the wal. + // useful to test the procedure recovery. + if (abortRollback) { + return false; + } + + subprocStack.remove(stackTail); + } + + // Finalize the procedure state + LOG.info("Rolledback procedure " + rootProc + + " exec-time=" + StringUtils.humanTimeDiff(rootProc.elapsedTime()) + + " exception=" + exception.getMessage()); + procedureFinished(rootProc); + return true; + } + + /** + * Execute the rollback of the procedure step. + * It updates the store with the new state (stack index) + * or will remove completly the procedure in case it is a child. + */ + private boolean executeRollback(final Procedure proc) { + try { + proc.doRollback(getEnvironment()); + } catch (IOException e) { + LOG.debug("rollback attempt failed for " + proc, e); + return false; + } catch (Throwable e) { + // Catch NullPointerExceptions or similar errors... + LOG.fatal("CODE-BUG: uncatched runtime exception for procedure: " + proc, e); + } + + // allows to kill the executor before something is stored to the wal. + // useful to test the procedure recovery. + if (testing != null && testing.shouldKillBeforeStoreUpdate()) { + LOG.debug("TESTING: Kill before store update"); + stop(); + return false; + } + + if (proc.removeStackIndex()) { + proc.setState(ProcedureState.ROLLEDBACK); + if (proc.hasParent()) { + store.delete(proc.getProcId()); + procedures.remove(proc.getProcId()); + } else { + store.update(proc); + } + } else { + store.update(proc); + } + return true; + } + + /** + * Executes the specified procedure + * - calls the doExecute() of the procedure + * - if the procedure execution didn't fail (e.g. invalid user input) + * - ...and returned subprocedures + * - the subprocedures are initialized. + * - the subprocedures are added to the store + * - the subprocedures are added to the runnable queue + * - the procedure is now in a WAITING state, waiting for the subprocedures to complete + * - ...if there are no subprocedure + * - the procedure completed successfully + * - if there is a parent (WAITING) + * - the parent state will be set to RUNNABLE + * - in case of failure + * - the store is updated with the new state + * - the executor (caller of this method) will start the rollback of the procedure + */ + private void execProcedure(final RootProcedureState procStack, final Procedure procedure) { + Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE); + + // Execute the procedure + boolean reExecute = false; + Procedure[] subprocs = null; + do { + reExecute = false; + try { + subprocs = procedure.doExecute(getEnvironment()); + if (subprocs != null && subprocs.length == 0) { + subprocs = null; + } + } catch (ProcedureYieldException e) { + if (LOG.isTraceEnabled()) { + LOG.trace("yield procedure: " + procedure); + } + runnables.yield(procedure); + return; + } catch (Throwable e) { + // Catch NullPointerExceptions or similar errors... + String msg = "CODE-BUG: uncatched runtime exception for procedure: " + procedure; + LOG.error(msg, e); + procedure.setFailure(new RemoteProcedureException(msg, e)); + } + + if (!procedure.isFailed()) { + if (subprocs != null) { + if (subprocs.length == 1 && subprocs[0] == procedure) { + // quick-shortcut for a state machine like procedure + subprocs = null; + reExecute = true; + } else { + // yield the current procedure, and make the subprocedure runnable + for (int i = 0; i < subprocs.length; ++i) { + Procedure subproc = subprocs[i]; + if (subproc == null) { + String msg = "subproc[" + i + "] is null, aborting the procedure"; + procedure.setFailure(new RemoteProcedureException(msg, + new IllegalArgumentException(msg))); + subprocs = null; + break; + } + + assert subproc.getState() == ProcedureState.INITIALIZING; + subproc.setParentProcId(procedure.getProcId()); + subproc.setProcId(nextProcId()); + } + + if (!procedure.isFailed()) { + procedure.setChildrenLatch(subprocs.length); + switch (procedure.getState()) { + case RUNNABLE: + procedure.setState(ProcedureState.WAITING); + break; + case WAITING_TIMEOUT: + waitingTimeout.add(procedure); + break; + default: + break; + } + } + } + } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) { + waitingTimeout.add(procedure); + } else { + // No subtask, so we are done + procedure.setState(ProcedureState.FINISHED); + } + } + + // Add the procedure to the stack + procStack.addRollbackStep(procedure); + + // allows to kill the executor before something is stored to the wal. + // useful to test the procedure recovery. + if (testing != null && testing.shouldKillBeforeStoreUpdate()) { + LOG.debug("TESTING: Kill before store update"); + stop(); + return; + } + + // Commit the transaction + if (subprocs != null && !procedure.isFailed()) { + if (LOG.isTraceEnabled()) { + LOG.trace("store add " + procedure + " children " + Arrays.toString(subprocs)); + } + store.insert(procedure, subprocs); + } else { + if (LOG.isTraceEnabled()) { + LOG.trace("store update " + procedure); + } + store.update(procedure); + } + + // if the store is not running we are aborting + if (!store.isRunning()) { + return; + } + + assert (reExecute && subprocs == null) || !reExecute; + } while (reExecute); + + // Submit the new subprocedures + if (subprocs != null && !procedure.isFailed()) { + for (int i = 0; i < subprocs.length; ++i) { + Procedure subproc = subprocs[i]; + assert !procedures.containsKey(subproc.getProcId()); + procedures.put(subproc.getProcId(), subproc); + runnables.addFront(subproc); + } + } + + if (procedure.isFinished() && procedure.hasParent()) { + Procedure parent = procedures.get(procedure.getParentProcId()); + if (parent == null) { + assert procStack.isRollingback(); + return; + } + + // If this procedure is the last child awake the parent procedure + if (LOG.isTraceEnabled()) { + LOG.trace(parent + " child is done: " + procedure); + } + if (parent.childrenCountDown() && parent.getState() == ProcedureState.WAITING) { + parent.setState(ProcedureState.RUNNABLE); + store.update(parent); + runnables.addFront(parent); + if (LOG.isTraceEnabled()) { + LOG.trace(parent + " all the children finished their work, resume."); + } + return; + } + } + } + + private void sendProcedureLoadedNotification(final long procId) { + if (!this.listeners.isEmpty()) { + for (ProcedureExecutorListener listener: this.listeners) { + try { + listener.procedureLoaded(procId); + } catch (Throwable e) { + LOG.error("the listener " + listener + " had an error: " + e.getMessage(), e); + } + } + } + } + + private void sendProcedureAddedNotification(final long procId) { + if (!this.listeners.isEmpty()) { + for (ProcedureExecutorListener listener: this.listeners) { + try { + listener.procedureAdded(procId); + } catch (Throwable e) { + LOG.error("the listener " + listener + " had an error: " + e.getMessage(), e); + } + } + } + } + + private void sendProcedureFinishedNotification(final long procId) { + if (!this.listeners.isEmpty()) { + for (ProcedureExecutorListener listener: this.listeners) { + try { + listener.procedureFinished(procId); + } catch (Throwable e) { + LOG.error("the listener " + listener + " had an error: " + e.getMessage(), e); + } + } + } + } + + private long nextProcId() { + long procId = lastProcId.incrementAndGet(); + if (procId < 0) { + while (!lastProcId.compareAndSet(procId, 0)) { + procId = lastProcId.get(); + if (procId >= 0) + break; + } + while (procedures.containsKey(procId)) { + procId = lastProcId.incrementAndGet(); + } + } + return procId; + } + + private Long getRootProcedureId(Procedure proc) { + return Procedure.getRootProcedureId(procedures, proc); + } + + private void procedureFinished(final Procedure proc) { + // call the procedure completion cleanup handler + try { + proc.completionCleanup(getEnvironment()); + } catch (Throwable e) { + // Catch NullPointerExceptions or similar errors... + LOG.error("CODE-BUG: uncatched runtime exception for procedure: " + proc, e); + } + + // update the executor internal state maps + completed.put(proc.getProcId(), newResultFromProcedure(proc)); + rollbackStack.remove(proc.getProcId()); + procedures.remove(proc.getProcId()); + + // call the runnableSet completion cleanup handler + try { + runnables.completionCleanup(proc); + } catch (Throwable e) { + // Catch NullPointerExceptions or similar errors... + LOG.error("CODE-BUG: uncatched runtime exception for runnableSet: " + runnables, e); + } + + // Notify the listeners + sendProcedureFinishedNotification(proc.getProcId()); + } + + public Pair getResultOrProcedure(final long procId) { + ProcedureResult result = completed.get(procId); + Procedure proc = null; + if (result == null) { + proc = procedures.get(procId); + if (proc == null) { + result = completed.get(procId); + } + } + return new Pair(result, proc); + } + + private static ProcedureResult newResultFromProcedure(final Procedure proc) { + if (proc.isFailed()) { + return new ProcedureResult(proc.getStartTime(), proc.getLastUpdate(), proc.getException()); + } + return new ProcedureResult(proc.getStartTime(), proc.getLastUpdate(), proc.getResult()); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java new file mode 100644 index 0000000..d8252b1 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureFairRunQueues.java @@ -0,0 +1,172 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.util.Map; + +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.ConcurrentSkipListMap; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * This class is a container of queues that allows to select a queue + * in a round robin fashion, considering priority of the queue. + * + * the quantum is just how many poll() will return the same object. + * e.g. if quantum is 1 and you have A and B as object you'll get: A B A B + * e.g. if quantum is 2 and you have A and B as object you'll get: A A B B A A B B + * then the object priority is just a priority * quantum + * + * Example: + * - three queues (A, B, C) with priorities (1, 1, 2) + * - The first poll() will return A + * - The second poll() will return B + * - The third and forth poll() will return C + * - and so on again and again. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureFairRunQueues { + private ConcurrentSkipListMap objMap = + new ConcurrentSkipListMap(); + + private final ReentrantLock lock = new ReentrantLock(); + private final int quantum; + + private Map.Entry current = null; + private int currentQuantum = 0; + + public interface FairObject { + boolean isAvailable(); + int getPriority(); + } + + /** + * @param quantum how many poll() will return the same object. + */ + public ProcedureFairRunQueues(final int quantum) { + this.quantum = quantum; + } + + public TQueue get(final TKey key) { + return objMap.get(key); + } + + public TQueue add(final TKey key, final TQueue queue) { + TQueue oldq = objMap.putIfAbsent(key, queue); + return oldq != null ? oldq : queue; + } + + public TQueue remove(final TKey key) { + TQueue queue = objMap.remove(key); + if (queue != null) { + lock.lock(); + try { + if (current != null && queue == current.getValue()) { + currentQuantum = 0; + current = null; + } + } finally { + lock.unlock(); + } + } + return queue; + } + + public void clear() { + lock.lock(); + try { + current = null; + objMap.clear(); + } finally { + lock.unlock(); + } + } + + /** + * @return the next available item if present + */ + public TQueue poll() { + lock.lock(); + try { + TQueue queue; + if (currentQuantum == 0) { + if (nextObject() == null) { + // nothing here + return null; + } + + queue = current.getValue(); + currentQuantum = calculateQuantum(queue) - 1; + } else { + currentQuantum--; + queue = current.getValue(); + } + + if (!queue.isAvailable()) { + Map.Entry last = current; + // Try the next one + do { + if (nextObject() == null) + return null; + } while (current.getValue() != last.getValue() && !current.getValue().isAvailable()); + + queue = current.getValue(); + currentQuantum = calculateQuantum(queue) - 1; + } + + return queue; + } finally { + lock.unlock(); + } + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append('{'); + for (Map.Entry entry: objMap.entrySet()) { + builder.append(entry.getKey()); + builder.append(':'); + builder.append(entry.getValue()); + } + builder.append('}'); + return builder.toString(); + } + + private Map.Entry nextObject() { + Map.Entry next = null; + + // If we have already a key, try the next one + if (current != null) { + next = objMap.higherEntry(current.getKey()); + } + + // if there is no higher key, go back to the first + current = (next != null) ? next : objMap.firstEntry(); + return current; + } + + private int calculateQuantum(final TQueue fairObject) { + // TODO + return Math.max(1, fairObject.getPriority() * quantum); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java new file mode 100644 index 0000000..0aebd5a --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureResult.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Once a Procedure completes the ProcedureExecutor takes all the useful + * information of the procedure (e.g. exception/result) and creates a ProcedureResult. + * The user of the Procedure framework will get the procedure result with + * procedureExecutor.getResult(procId) + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ProcedureResult { + private final RemoteProcedureException exception; + private final long lastUpdate; + private final long startTime; + private final byte[] result; + + private long clientAckTime = -1; + + public ProcedureResult(final long startTime, final long lastUpdate, + final RemoteProcedureException exception) { + this.lastUpdate = lastUpdate; + this.startTime = startTime; + this.exception = exception; + this.result = null; + } + + public ProcedureResult(final long startTime, final long lastUpdate, final byte[] result) { + this.lastUpdate = lastUpdate; + this.startTime = startTime; + this.exception = null; + this.result = result; + } + + public boolean isFailed() { + return exception != null; + } + + public RemoteProcedureException getException() { + return exception; + } + + public boolean hasResultData() { + return result != null; + } + + public byte[] getResult() { + return result; + } + + public long getStartTime() { + return startTime; + } + + public long getLastUpdate() { + return lastUpdate; + } + + public long executionTime() { + return lastUpdate - startTime; + } + + public boolean hasClientAckTime() { + return clientAckTime > 0; + } + + public long getClientAckTime() { + return clientAckTime; + } + + @InterfaceAudience.Private + protected void setClientAckTime(final long timestamp) { + this.clientAckTime = timestamp; + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java new file mode 100644 index 0000000..2d7ba39 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureRunnableSet.java @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Keep track of the runnable procedures + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public interface ProcedureRunnableSet { + /** + * Inserts the specified element at the front of this queue. + * @param proc the Procedure to add + */ + void addFront(Procedure proc); + + /** + * Inserts the specified element at the end of this queue. + * @param proc the Procedure to add + */ + void addBack(Procedure proc); + + /** + * The procedure can't run at the moment. + * add it back to the queue, giving priority to someone else. + * @param proc the Procedure to add back to the list + */ + void yield(Procedure proc); + + /** + * The procedure in execution completed. + * This can be implemented to perform cleanups. + * @param proc the Procedure that completed the execution. + */ + void completionCleanup(Procedure proc); + + /** + * Fetch one Procedure from the queue + * @return the Procedure ID to execute, or null if nothing present. + */ + Long poll(); + + /** + * In case the class is blocking on poll() waiting for items to be added, + * this method should awake poll() and poll() should return. + */ + void signalAll(); + + /** + * Returns the number of elements in this collection. + * @return the number of elements in this collection. + */ + int size(); + + /** + * Removes all of the elements from this collection. + */ + void clear(); +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java new file mode 100644 index 0000000..7b17fb2 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSimpleRunQueue.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Simple runqueue for the procedures + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureSimpleRunQueue implements ProcedureRunnableSet { + private final Deque runnables = new ArrayDeque(); + private final ReentrantLock lock = new ReentrantLock(); + private final Condition waitCond = lock.newCondition(); + + @Override + public void addFront(final Procedure proc) { + lock.lock(); + try { + runnables.addFirst(proc.getProcId()); + waitCond.signal(); + } finally { + lock.unlock(); + } + } + + @Override + public void addBack(final Procedure proc) { + lock.lock(); + try { + runnables.addLast(proc.getProcId()); + waitCond.signal(); + } finally { + lock.unlock(); + } + } + + @Override + public void yield(final Procedure proc) { + addBack(proc); + } + + @Override + @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP") + public Long poll() { + lock.lock(); + try { + if (runnables.isEmpty()) { + waitCond.await(); + if (!runnables.isEmpty()) { + return runnables.pop(); + } + } else { + return runnables.pop(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } finally { + lock.unlock(); + } + return null; + } + + @Override + public void signalAll() { + lock.lock(); + try { + waitCond.signalAll(); + } finally { + lock.unlock(); + } + } + + @Override + public void clear() { + lock.lock(); + try { + runnables.clear(); + } finally { + lock.unlock(); + } + } + + @Override + public int size() { + lock.lock(); + try { + return runnables.size(); + } finally { + lock.unlock(); + } + } + + @Override + public void completionCleanup(Procedure proc) { + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java new file mode 100644 index 0000000..177ff5b --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +// TODO: Not used yet +@InterfaceAudience.Public +@InterfaceStability.Stable +public class ProcedureYieldException extends ProcedureException { + /** default constructor */ + public ProcedureYieldException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public ProcedureYieldException(String s) { + super(s); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java new file mode 100644 index 0000000..6be512d --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; + +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; +import org.apache.hadoop.hbase.util.ForeignExceptionUtil; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * A RemoteProcedureException is an exception from another thread or process. + *

+ * RemoteProcedureExceptions are sent to 'remote' peers to signal an abort in the face of failures. + * When serialized for transmission we encode using Protobufs to ensure version compatibility. + *

+ * RemoteProcedureException exceptions contain a Throwable as its cause. + * This can be a "regular" exception generated locally or a ProxyThrowable that is a representation + * of the original exception created on original 'remote' source. These ProxyThrowables have their + * their stacks traces and messages overridden to reflect the original 'remote' exception. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +@SuppressWarnings("serial") +public class RemoteProcedureException extends ProcedureException { + + /** + * Name of the throwable's source such as a host or thread name. Must be non-null. + */ + private final String source; + + /** + * Create a new RemoteProcedureException that can be serialized. + * It is assumed that this came form a local source. + * @param source + * @param cause + */ + public RemoteProcedureException(String source, Throwable cause) { + super(cause); + assert source != null; + assert cause != null; + this.source = source; + } + + public String getSource() { + return source; + } + + public IOException unwrapRemoteException() { + if (getCause() instanceof RemoteException) { + return ((RemoteException)getCause()).unwrapRemoteException(); + } + if (getCause() instanceof IOException) { + return (IOException)getCause(); + } + return new IOException(getCause()); + } + + @Override + public String toString() { + String className = getCause().getClass().getName(); + return className + " via " + getSource() + ":" + getLocalizedMessage(); + } + + /** + * Converts a RemoteProcedureException to an array of bytes. + * @param source the name of the external exception source + * @param t the "local" external exception (local) + * @return protobuf serialized version of RemoteProcedureException + */ + public static byte[] serialize(String source, Throwable t) { + return toProto(source, t).toByteArray(); + } + + /** + * Takes a series of bytes and tries to generate an RemoteProcedureException instance for it. + * @param bytes + * @return the ForeignExcpetion instance + * @throws InvalidProtocolBufferException if there was deserialization problem this is thrown. + */ + public static RemoteProcedureException deserialize(byte[] bytes) + throws InvalidProtocolBufferException { + return fromProto(ForeignExceptionMessage.parseFrom(bytes)); + } + + public ForeignExceptionMessage convert() { + return ForeignExceptionUtil.toProtoForeignException(getSource(), getCause()); + } + + public static ForeignExceptionMessage toProto(String source, Throwable t) { + return ForeignExceptionUtil.toProtoForeignException(source, t); + } + + public static RemoteProcedureException fromProto(final ForeignExceptionMessage eem) { + return new RemoteProcedureException(eem.getSource(), ForeignExceptionUtil.toIOException(eem)); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java new file mode 100644 index 0000000..bc1af20 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java @@ -0,0 +1,185 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState; + +/** + * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". + * A "Root Procedure" is a Procedure without parent, each subprocedure will be + * added to the "Root Procedure" stack (or rollback-stack). + * + * RootProcedureState is used and managed only by the ProcedureExecutor. + * Long rootProcId = getRootProcedureId(proc); + * rollbackStack.get(rootProcId).acquire(proc) + * rollbackStack.get(rootProcId).release(proc) + * ... + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +class RootProcedureState { + private static final Log LOG = LogFactory.getLog(RootProcedureState.class); + + private enum State { + RUNNING, // The Procedure is running or ready to run + FAILED, // The Procedure failed, waiting for the rollback executing + ROLLINGBACK, // The Procedure failed and the execution was rolledback + } + + private ArrayList subprocedures = null; + private State state = State.RUNNING; + private int running = 0; + + public synchronized boolean isFailed() { + switch (state) { + case ROLLINGBACK: + case FAILED: + return true; + default: + break; + } + return false; + } + + public synchronized boolean isRollingback() { + return state == State.ROLLINGBACK; + } + + /** + * Called by the ProcedureExecutor to mark rollback execution + */ + protected synchronized boolean setRollback() { + if (running == 0 && state == State.FAILED) { + state = State.ROLLINGBACK; + return true; + } + return false; + } + + /** + * Called by the ProcedureExecutor to mark rollback execution + */ + protected synchronized void unsetRollback() { + assert state == State.ROLLINGBACK; + state = State.FAILED; + } + + protected synchronized List getSubprocedures() { + return subprocedures; + } + + protected synchronized RemoteProcedureException getException() { + if (subprocedures != null) { + for (Procedure proc: subprocedures) { + if (proc.hasException()) { + return proc.getException(); + } + } + } + return null; + } + + /** + * Called by the ProcedureExecutor to mark the procedure step as running. + */ + protected synchronized boolean acquire(final Procedure proc) { + if (state != State.RUNNING) return false; + + running++; + return true; + } + + /** + * Called by the ProcedureExecutor to mark the procedure step as finished. + */ + protected synchronized void release(final Procedure proc) { + running--; + } + + protected synchronized void abort() { + if (state == State.RUNNING) { + state = State.FAILED; + } + } + + /** + * Called by the ProcedureExecutor after the procedure step is completed, + * to add the step to the rollback list (or procedure stack) + */ + protected synchronized void addRollbackStep(final Procedure proc) { + if (proc.isFailed()) { + state = State.FAILED; + } + if (subprocedures == null) { + subprocedures = new ArrayList(); + } + proc.addStackIndex(subprocedures.size()); + subprocedures.add(proc); + } + + /** + * Called on store load by the ProcedureExecutor to load part of the stack. + * + * Each procedure has its own stack-positions. Which means we have to write + * to the store only the Procedure we executed, and nothing else. + * on load we recreate the full stack by aggregating each procedure stack-positions. + */ + protected synchronized void loadStack(final Procedure proc) { + int[] stackIndexes = proc.getStackIndexes(); + if (stackIndexes != null) { + if (subprocedures == null) { + subprocedures = new ArrayList(); + } + int diff = (1 + stackIndexes[stackIndexes.length - 1]) - subprocedures.size(); + if (diff > 0) { + subprocedures.ensureCapacity(1 + stackIndexes[stackIndexes.length - 1]); + while (diff-- > 0) subprocedures.add(null); + } + for (int i = 0; i < stackIndexes.length; ++i) { + subprocedures.set(stackIndexes[i], proc); + } + } + if (proc.getState() == ProcedureState.ROLLEDBACK) { + state = State.ROLLINGBACK; + } else if (proc.isFailed()) { + state = State.FAILED; + } + } + + /** + * Called on store load by the ProcedureExecutor to validate the procedure stack. + */ + protected synchronized boolean isValid() { + if (subprocedures != null) { + for (Procedure proc: subprocedures) { + if (proc == null) { + return false; + } + } + } + return true; + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java new file mode 100644 index 0000000..b4b35f2 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData; + +/** + * A SequentialProcedure describes one step in a procedure chain. + * -> Step 1 -> Step 2 -> Step 3 + * + * The main difference from a base Procedure is that the execute() of a + * SequentialProcedure will be called only once, there will be no second + * execute() call once the child are finished. which means once the child + * of a SequentialProcedure are completed the SequentialProcedure is completed too. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class SequentialProcedure extends Procedure { + private boolean executed = false; + + @Override + protected Procedure[] doExecute(final TEnvironment env) + throws ProcedureYieldException { + updateTimestamp(); + try { + Procedure[] children = !executed ? execute(env) : null; + executed = !executed; + return children; + } finally { + updateTimestamp(); + } + } + + @Override + protected void doRollback(final TEnvironment env) throws IOException { + updateTimestamp(); + if (executed) { + try { + rollback(env); + executed = !executed; + } finally { + updateTimestamp(); + } + } + } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException { + SequentialProcedureData.Builder data = SequentialProcedureData.newBuilder(); + data.setExecuted(executed); + data.build().writeDelimitedTo(stream); + } + + @Override + protected void deserializeStateData(final InputStream stream) throws IOException { + SequentialProcedureData data = SequentialProcedureData.parseDelimitedFrom(stream); + executed = data.getExecuted(); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java new file mode 100644 index 0000000..eab96e4 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Arrays; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData; + +/** + * Procedure described by a series of steps. + * + * The procedure implementor must have an enum of 'states', describing + * the various step of the procedure. + * Once the procedure is running, the procedure-framework will call executeFromState() + * using the 'state' provided by the user. The first call to executeFromState() + * will be performed with 'state = null'. The implementor can jump between + * states using setNextState(MyStateEnum.ordinal()). + * The rollback will call rollbackState() for each state that was executed, in reverse order. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class StateMachineProcedure + extends Procedure { + private int stateCount = 0; + private int[] states = null; + + protected enum Flow { + HAS_MORE_STATE, + NO_MORE_STATE, + } + + /** + * called to perform a single step of the specified 'state' of the procedure + * @param state state to execute + * @return Flow.NO_MORE_STATE if the procedure is completed, + * Flow.HAS_MORE_STATE if there is another step. + */ + protected abstract Flow executeFromState(TEnvironment env, TState state) + throws ProcedureYieldException; + + /** + * called to perform the rollback of the specified state + * @param state state to rollback + * @throws IOException temporary failure, the rollback will retry later + */ + protected abstract void rollbackState(TEnvironment env, TState state) + throws IOException; + + /** + * Convert an ordinal (or state id) to an Enum (or more descriptive) state object. + * @param stateId the ordinal() of the state enum (or state id) + * @return the state enum object + */ + protected abstract TState getState(int stateId); + + /** + * Convert the Enum (or more descriptive) state object to an ordinal (or state id). + * @param state the state enum object + * @return stateId the ordinal() of the state enum (or state id) + */ + protected abstract int getStateId(TState state); + + /** + * Return the initial state object that will be used for the first call to executeFromState(). + * @return the initial state enum object + */ + protected abstract TState getInitialState(); + + /** + * Set the next state for the procedure. + * @param state the state enum object + */ + protected void setNextState(final TState state) { + setNextState(getStateId(state)); + } + + @Override + protected Procedure[] execute(final TEnvironment env) + throws ProcedureYieldException { + updateTimestamp(); + try { + TState state = stateCount > 0 ? getState(states[stateCount-1]) : getInitialState(); + if (stateCount == 0) { + setNextState(getStateId(state)); + } + if (executeFromState(env, state) == Flow.NO_MORE_STATE) { + // completed + return null; + } + return (isWaiting() || isFailed()) ? null : new Procedure[] {this}; + } finally { + updateTimestamp(); + } + } + + @Override + protected void rollback(final TEnvironment env) throws IOException { + try { + updateTimestamp(); + rollbackState(env, stateCount > 0 ? getState(states[stateCount-1]) : getInitialState()); + stateCount--; + } finally { + updateTimestamp(); + } + } + + /** + * Set the next state for the procedure. + * @param stateId the ordinal() of the state enum (or state id) + */ + private void setNextState(final int stateId) { + if (states == null || states.length == stateCount) { + int newCapacity = stateCount + 8; + if (states != null) { + states = Arrays.copyOf(states, newCapacity); + } else { + states = new int[newCapacity]; + } + } + states[stateCount++] = stateId; + } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException { + StateMachineProcedureData.Builder data = StateMachineProcedureData.newBuilder(); + for (int i = 0; i < stateCount; ++i) { + data.addState(states[i]); + } + data.build().writeDelimitedTo(stream); + } + + @Override + protected void deserializeStateData(final InputStream stream) throws IOException { + StateMachineProcedureData data = StateMachineProcedureData.parseDelimitedFrom(stream); + stateCount = data.getStateCount(); + if (stateCount > 0) { + states = new int[stateCount]; + for (int i = 0; i < stateCount; ++i) { + states[i] = data.getState(i); + } + } else { + states = null; + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java new file mode 100644 index 0000000..cd6b0a7 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class TwoPhaseProcedure extends Procedure { + // TODO (e.g. used by ACLs/VisibilityTags updates) +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java new file mode 100644 index 0000000..0d1c050 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store; + +import java.io.IOException; +import java.util.Iterator; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.Procedure; + +/** + * The ProcedureStore is used by the executor to persist the state of each procedure execution. + * This allows to resume the execution of pending/in-progress procedures in case + * of machine failure or service shutdown. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface ProcedureStore { + /** + * Store listener interface. + * The main process should register a listener and respond to the store events. + */ + public interface ProcedureStoreListener { + /** + * triggered when the store is not able to write out data. + * the main process should abort. + */ + void abortProcess(); + } + + /** + * Add the listener to the notification list. + * @param listener The AssignmentListener to register + */ + void registerListener(ProcedureStoreListener listener); + + /** + * Remove the listener from the notification list. + * @param listener The AssignmentListener to unregister + * @return true if the listner was in the list and it was removed, otherwise false. + */ + boolean unregisterListener(ProcedureStoreListener listener); + + /** + * Start/Open the procedure store + * @param numThreads + */ + void start(int numThreads) throws IOException; + + /** + * Stop/Close the procedure store + * @param abort true if the stop is an abort + */ + void stop(boolean abort); + + /** + * @return true if the store is running, otherwise false. + */ + boolean isRunning(); + + /** + * @return the number of threads/slots passed to start() + */ + int getNumThreads(); + + /** + * Acquire the lease for the procedure store. + */ + void recoverLease() throws IOException; + + /** + * Load the Procedures in the store. + * @return the set of procedures present in the store + */ + Iterator load() throws IOException; + + /** + * When a procedure is submitted to the executor insert(proc, null) will be called. + * 'proc' has a 'RUNNABLE' state and the initial information required to start up. + * + * When a procedure is executed and it returns children insert(proc, subprocs) will be called. + * 'proc' has a 'WAITING' state and an update state. + * 'subprocs' are the children in 'RUNNABLE' state with the initial information. + * + * @param proc the procedure to serialize and write to the store. + * @param subprocs the newly created child of the proc. + */ + void insert(Procedure proc, Procedure[] subprocs); + + /** + * The specified procedure was executed, + * and the new state should be written to the store. + * @param proc the procedure to serialize and write to the store. + */ + void update(Procedure proc); + + /** + * The specified procId was removed from the executor, + * due to completion, abort or failure. + * The store implementor should remove all the information about the specified procId. + * @param procId the ID of the procedure to remove. + */ + void delete(long procId); +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java new file mode 100644 index 0000000..4e4653a --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java @@ -0,0 +1,540 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Iterator; +import java.util.Map; +import java.util.TreeMap; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos; + +/** + * Keeps track of live procedures. + * + * It can be used by the ProcedureStore to identify which procedures are already + * deleted/completed to avoid the deserialization step on restart. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class ProcedureStoreTracker { + private final TreeMap map = new TreeMap(); + + private boolean keepDeletes = false; + private boolean partial = false; + + public enum DeleteState { YES, NO, MAYBE } + + public static class BitSetNode { + private final static long WORD_MASK = 0xffffffffffffffffL; + private final static int ADDRESS_BITS_PER_WORD = 6; + private final static int BITS_PER_WORD = 1 << ADDRESS_BITS_PER_WORD; + private final static int MAX_NODE_SIZE = 4 << ADDRESS_BITS_PER_WORD; + + private long[] updated; + private long[] deleted; + private long start; + + public void dump() { + System.out.printf("%06d:%06d min=%d max=%d%n", getStart(), getEnd(), + getMinProcId(), getMaxProcId()); + System.out.println("Update:"); + for (int i = 0; i < updated.length; ++i) { + for (int j = 0; j < BITS_PER_WORD; ++j) { + System.out.print((updated[i] & (1L << j)) != 0 ? "1" : "0"); + } + System.out.println(" " + i); + } + System.out.println(); + System.out.println("Delete:"); + for (int i = 0; i < deleted.length; ++i) { + for (int j = 0; j < BITS_PER_WORD; ++j) { + System.out.print((deleted[i] & (1L << j)) != 0 ? "1" : "0"); + } + System.out.println(" " + i); + } + System.out.println(); + } + + public BitSetNode(final long procId, final boolean partial) { + start = alignDown(procId); + + int count = 2; + updated = new long[count]; + deleted = new long[count]; + for (int i = 0; i < count; ++i) { + updated[i] = 0; + deleted[i] = partial ? 0 : WORD_MASK; + } + + updateState(procId, false); + } + + protected BitSetNode(final long start, final long[] updated, final long[] deleted) { + this.start = start; + this.updated = updated; + this.deleted = deleted; + } + + public void update(final long procId) { + updateState(procId, false); + } + + public void delete(final long procId) { + updateState(procId, true); + } + + public Long getStart() { + return start; + } + + public Long getEnd() { + return start + (updated.length << ADDRESS_BITS_PER_WORD) - 1; + } + + public boolean contains(final long procId) { + return start <= procId && procId <= getEnd(); + } + + public DeleteState isDeleted(final long procId) { + int bitmapIndex = getBitmapIndex(procId); + int wordIndex = bitmapIndex >> ADDRESS_BITS_PER_WORD; + if (wordIndex >= deleted.length) { + return DeleteState.MAYBE; + } + return (deleted[wordIndex] & (1L << bitmapIndex)) != 0 ? DeleteState.YES : DeleteState.NO; + } + + private boolean isUpdated(final long procId) { + int bitmapIndex = getBitmapIndex(procId); + int wordIndex = bitmapIndex >> ADDRESS_BITS_PER_WORD; + if (wordIndex >= updated.length) { + return false; + } + return (updated[wordIndex] & (1L << bitmapIndex)) != 0; + } + + public boolean isUpdated() { + // TODO: cache the value + for (int i = 0; i < updated.length; ++i) { + long deleteMask = ~deleted[i]; + if ((updated[i] & deleteMask) != (WORD_MASK & deleteMask)) { + return false; + } + } + return true; + } + + public boolean isEmpty() { + // TODO: cache the value + for (int i = 0; i < deleted.length; ++i) { + if (deleted[i] != WORD_MASK) { + return false; + } + } + return true; + } + + public void resetUpdates() { + for (int i = 0; i < updated.length; ++i) { + updated[i] = 0; + } + } + + public void undeleteAll() { + for (int i = 0; i < updated.length; ++i) { + deleted[i] = 0; + } + } + + public ProcedureProtos.ProcedureStoreTracker.TrackerNode convert() { + ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builder = + ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder(); + builder.setStartId(start); + for (int i = 0; i < updated.length; ++i) { + builder.addUpdated(updated[i]); + builder.addDeleted(deleted[i]); + } + return builder.build(); + } + + public static BitSetNode convert(ProcedureProtos.ProcedureStoreTracker.TrackerNode data) { + long start = data.getStartId(); + int size = data.getUpdatedCount(); + long[] updated = new long[size]; + long[] deleted = new long[size]; + for (int i = 0; i < size; ++i) { + updated[i] = data.getUpdated(i); + deleted[i] = data.getDeleted(i); + } + return new BitSetNode(start, updated, deleted); + } + + // ======================================================================== + // Grow/Merge Helpers + // ======================================================================== + public boolean canGrow(final long procId) { + return (procId - start) < MAX_NODE_SIZE; + } + + public boolean canMerge(final BitSetNode rightNode) { + return (start + rightNode.getEnd()) < MAX_NODE_SIZE; + } + + public void grow(final long procId) { + int delta, offset; + + if (procId < start) { + // add to head + long newStart = alignDown(procId); + delta = (int)(start - newStart) >> ADDRESS_BITS_PER_WORD; + offset = delta; + } else { + // Add to tail + long newEnd = alignUp(procId + 1); + delta = (int)(newEnd - getEnd()) >> ADDRESS_BITS_PER_WORD; + offset = 0; + } + + long[] newBitmap; + int oldSize = updated.length; + + newBitmap = new long[oldSize + delta]; + System.arraycopy(updated, 0, newBitmap, offset, oldSize); + updated = newBitmap; + + newBitmap = new long[deleted.length + delta]; + System.arraycopy(deleted, 0, newBitmap, offset, oldSize); + deleted = newBitmap; + + for (int i = 0; i < delta; ++i) { + updated[oldSize + i] = 0; + deleted[oldSize + i] = WORD_MASK; + } + } + + public void merge(final BitSetNode rightNode) { + int delta = (int)(rightNode.getEnd() - getEnd()) >> ADDRESS_BITS_PER_WORD; + + long[] newBitmap; + int oldSize = updated.length; + int newSize = (delta - rightNode.updated.length); + int offset = oldSize + newSize; + + newBitmap = new long[oldSize + delta]; + System.arraycopy(updated, 0, newBitmap, 0, oldSize); + System.arraycopy(rightNode.updated, 0, newBitmap, offset, rightNode.updated.length); + updated = newBitmap; + + newBitmap = new long[oldSize + delta]; + System.arraycopy(deleted, 0, newBitmap, 0, oldSize); + System.arraycopy(rightNode.deleted, 0, newBitmap, offset, rightNode.deleted.length); + deleted = newBitmap; + + for (int i = 0; i < newSize; ++i) { + updated[offset + i] = 0; + deleted[offset + i] = WORD_MASK; + } + } + + // ======================================================================== + // Min/Max Helpers + // ======================================================================== + public long getMinProcId() { + long minProcId = start; + for (int i = 0; i < deleted.length; ++i) { + if (deleted[i] == 0) { + return(minProcId); + } + + if (deleted[i] != WORD_MASK) { + for (int j = 0; j < BITS_PER_WORD; ++j) { + if ((deleted[i] & (1L << j)) != 0) { + return minProcId + j; + } + } + } + + minProcId += BITS_PER_WORD; + } + return minProcId; + } + + public long getMaxProcId() { + long maxProcId = getEnd(); + for (int i = deleted.length - 1; i >= 0; --i) { + if (deleted[i] == 0) { + return maxProcId; + } + + if (deleted[i] != WORD_MASK) { + for (int j = BITS_PER_WORD - 1; j >= 0; --j) { + if ((deleted[i] & (1L << j)) == 0) { + return maxProcId - (BITS_PER_WORD - 1 - j); + } + } + } + maxProcId -= BITS_PER_WORD; + } + return maxProcId; + } + + // ======================================================================== + // Bitmap Helpers + // ======================================================================== + private int getBitmapIndex(final long procId) { + return (int)(procId - start); + } + + private void updateState(final long procId, final boolean isDeleted) { + int bitmapIndex = getBitmapIndex(procId); + int wordIndex = bitmapIndex >> ADDRESS_BITS_PER_WORD; + long value = (1L << bitmapIndex); + + if (isDeleted) { + updated[wordIndex] |= value; + deleted[wordIndex] |= value; + } else { + updated[wordIndex] |= value; + deleted[wordIndex] &= ~value; + } + } + + // ======================================================================== + // Helpers + // ======================================================================== + private static long alignUp(final long x) { + return (x + (BITS_PER_WORD - 1)) & -BITS_PER_WORD; + } + + private static long alignDown(final long x) { + return x & -BITS_PER_WORD; + } + } + + public void insert(final Procedure proc, final Procedure[] subprocs) { + insert(proc.getProcId()); + if (subprocs != null) { + for (int i = 0; i < subprocs.length; ++i) { + insert(subprocs[i].getProcId()); + } + } + } + + public void update(final Procedure proc) { + update(proc.getProcId()); + } + + public void insert(long procId) { + BitSetNode node = getOrCreateNode(procId); + node.update(procId); + } + + public void update(long procId) { + Map.Entry entry = map.floorEntry(procId); + assert entry != null : "expected node to update procId=" + procId; + + BitSetNode node = entry.getValue(); + assert node.contains(procId); + node.update(procId); + } + + public void delete(long procId) { + Map.Entry entry = map.floorEntry(procId); + assert entry != null : "expected node to delete procId=" + procId; + + BitSetNode node = entry.getValue(); + assert node.contains(procId) : "expected procId in the node"; + node.delete(procId); + + if (!keepDeletes && node.isEmpty()) { + // TODO: RESET if (map.size() == 1) + map.remove(entry.getKey()); + } + } + + @InterfaceAudience.Private + public void setDeleted(final long procId, final boolean isDeleted) { + BitSetNode node = getOrCreateNode(procId); + node.updateState(procId, isDeleted); + } + + public void clear() { + this.map.clear(); + } + + public DeleteState isDeleted(long procId) { + Map.Entry entry = map.floorEntry(procId); + if (entry != null) { + BitSetNode node = entry.getValue(); + DeleteState state = node.isDeleted(procId); + return partial && !node.isUpdated(procId) ? DeleteState.MAYBE : state; + } + return partial ? DeleteState.MAYBE : DeleteState.YES; + } + + public long getMinProcId() { + // TODO: Cache? + Map.Entry entry = map.firstEntry(); + return entry == null ? 0 : entry.getValue().getMinProcId(); + } + + public void setKeepDeletes(boolean keepDeletes) { + this.keepDeletes = keepDeletes; + if (!keepDeletes) { + Iterator> it = map.entrySet().iterator(); + while (it.hasNext()) { + Map.Entry entry = it.next(); + if (entry.getValue().isEmpty()) { + it.remove(); + } + } + } + } + + public void setPartialFlag(boolean isPartial) { + this.partial = isPartial; + } + + public boolean isEmpty() { + for (Map.Entry entry : map.entrySet()) { + if (entry.getValue().isEmpty() == false) { + return false; + } + } + return true; + } + + public boolean isUpdated() { + for (Map.Entry entry : map.entrySet()) { + if (entry.getValue().isUpdated() == false) { + return false; + } + } + return true; + } + + public void resetUpdates() { + for (Map.Entry entry : map.entrySet()) { + entry.getValue().resetUpdates(); + } + } + + public void undeleteAll() { + for (Map.Entry entry : map.entrySet()) { + entry.getValue().undeleteAll(); + } + } + + private BitSetNode getOrCreateNode(final long procId) { + // can procId fit in the left node? + BitSetNode leftNode = null; + boolean leftCanGrow = false; + Map.Entry leftEntry = map.floorEntry(procId); + if (leftEntry != null) { + leftNode = leftEntry.getValue(); + if (leftNode.contains(procId)) { + return leftNode; + } + leftCanGrow = leftNode.canGrow(procId); + } + + BitSetNode rightNode = null; + boolean rightCanGrow = false; + Map.Entry rightEntry = map.ceilingEntry(procId); + if (rightEntry != null) { + rightNode = rightEntry.getValue(); + rightCanGrow = rightNode.canGrow(procId); + if (leftNode != null) { + if (leftNode.canMerge(rightNode)) { + // merge left and right node + return mergeNodes(leftNode, rightNode); + } + + if (leftCanGrow && rightCanGrow) { + if ((procId - leftNode.getEnd()) <= (rightNode.getStart() - procId)) { + // grow the left node + return growNode(leftNode, procId); + } + // grow the right node + return growNode(rightNode, procId); + } + } + } + + // grow the left node + if (leftCanGrow) { + return growNode(leftNode, procId); + } + + // grow the right node + if (rightCanGrow) { + return growNode(rightNode, procId); + } + + // add new node + BitSetNode node = new BitSetNode(procId, partial); + map.put(node.getStart(), node); + return node; + } + + private BitSetNode growNode(BitSetNode node, long procId) { + map.remove(node.getStart()); + node.grow(procId); + map.put(node.getStart(), node); + return node; + } + + private BitSetNode mergeNodes(BitSetNode leftNode, BitSetNode rightNode) { + leftNode.merge(rightNode); + map.remove(rightNode.getStart()); + return leftNode; + } + + public void dump() { + System.out.println("map " + map.size()); + for (Map.Entry entry : map.entrySet()) { + entry.getValue().dump(); + } + } + + public void writeTo(final OutputStream stream) throws IOException { + ProcedureProtos.ProcedureStoreTracker.Builder builder = + ProcedureProtos.ProcedureStoreTracker.newBuilder(); + for (Map.Entry entry : map.entrySet()) { + builder.addNode(entry.getValue().convert()); + } + builder.build().writeDelimitedTo(stream); + } + + public void readFrom(final InputStream stream) throws IOException { + ProcedureProtos.ProcedureStoreTracker data = + ProcedureProtos.ProcedureStoreTracker.parseDelimitedFrom(stream); + map.clear(); + for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode: data.getNodeList()) { + BitSetNode node = BitSetNode.convert(protoNode); + map.put(node.getStart(), node); + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java new file mode 100644 index 0000000..29db3bf --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import org.apache.hadoop.hbase.HBaseIOException; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Thrown when a procedure WAL is corrupted + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class CorruptedWALProcedureStoreException extends HBaseIOException { + /** default constructor */ + public CorruptedWALProcedureStoreException() { + super(); + } + + /** + * Constructor + * @param s message + */ + public CorruptedWALProcedureStoreException(String s) { + super(s); + } +} diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java new file mode 100644 index 0000000..859b3cb --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java @@ -0,0 +1,152 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer; + +/** + * Describes a WAL File + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureWALFile implements Comparable { + private static final Log LOG = LogFactory.getLog(ProcedureWALFile.class); + + private ProcedureWALHeader header; + private FSDataInputStream stream; + private FileStatus logStatus; + private FileSystem fs; + private Path logFile; + private long startPos; + + public ProcedureWALFile(final FileSystem fs, final FileStatus logStatus) { + this.fs = fs; + this.logStatus = logStatus; + this.logFile = logStatus.getPath(); + } + + public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, long startPos) { + this.fs = fs; + this.logFile = logFile; + this.header = header; + this.startPos = startPos; + } + + public void open() throws IOException { + if (stream == null) { + stream = fs.open(logFile); + } + + if (header == null) { + header = ProcedureWALFormat.readHeader(stream); + startPos = stream.getPos(); + } else { + stream.seek(startPos); + } + } + + public ProcedureWALTrailer readTrailer() throws IOException { + try { + return ProcedureWALFormat.readTrailer(stream, startPos, logStatus.getLen()); + } finally { + stream.seek(startPos); + } + } + + public void readTracker(ProcedureStoreTracker tracker) throws IOException { + ProcedureWALTrailer trailer = readTrailer(); + try { + stream.seek(trailer.getTrackerPos()); + tracker.readFrom(stream); + } finally { + stream.seek(startPos); + } + } + + public void close() { + if (stream == null) return; + try { + stream.close(); + } catch (IOException e) { + LOG.warn("unable to close the wal file: " + logFile, e); + } finally { + stream = null; + } + } + + public FSDataInputStream getStream() { + return stream; + } + + public ProcedureWALHeader getHeader() { + return header; + } + + public boolean isCompacted() { + return header.getType() == ProcedureWALFormat.LOG_TYPE_COMPACTED; + } + + public long getLogId() { + return header.getLogId(); + } + + public long getSize() { + return logStatus.getLen(); + } + + public void removeFile() throws IOException { + close(); + fs.delete(logFile, false); + } + + @Override + public int compareTo(final ProcedureWALFile other) { + long diff = header.getLogId() - other.header.getLogId(); + return (diff < 0) ? -1 : (diff > 0) ? 1 : 0; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof ProcedureWALFile)) return false; + return compareTo((ProcedureWALFile)o) == 0; + } + + @Override + public int hashCode() { + return logFile.hashCode(); + } + + @Override + public String toString() { + return logFile.toString(); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java new file mode 100644 index 0000000..17432ac --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java @@ -0,0 +1,234 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Iterator; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.hbase.io.util.StreamUtils; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker; +import org.apache.hadoop.hbase.procedure2.util.ByteSlot; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer; + +import com.google.protobuf.InvalidProtocolBufferException; + +/** + * Helper class that contains the WAL serialization utils. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class ProcedureWALFormat { + static final byte LOG_TYPE_STREAM = 0; + static final byte LOG_TYPE_COMPACTED = 1; + static final byte LOG_TYPE_MAX_VALID = 1; + + static final byte HEADER_VERSION = 1; + static final byte TRAILER_VERSION = 1; + static final long HEADER_MAGIC = 0x31764c4157637250L; + static final long TRAILER_MAGIC = 0x50726357414c7631L; + + @InterfaceAudience.Private + public static class InvalidWALDataException extends IOException { + public InvalidWALDataException(String s) { + super(s); + } + + public InvalidWALDataException(Throwable t) { + super(t); + } + } + + interface Loader { + void removeLog(ProcedureWALFile log); + void markCorruptedWAL(ProcedureWALFile log, IOException e); + } + + private ProcedureWALFormat() {} + + public static Iterator load(final Iterator logs, + final ProcedureStoreTracker tracker, final Loader loader) throws IOException { + ProcedureWALFormatReader reader = new ProcedureWALFormatReader(tracker); + tracker.setKeepDeletes(true); + try { + while (logs.hasNext()) { + ProcedureWALFile log = logs.next(); + log.open(); + try { + reader.read(log, loader); + } finally { + log.close(); + } + } + // The tracker is now updated with all the procedures read from the logs + tracker.setPartialFlag(false); + tracker.resetUpdates(); + } finally { + tracker.setKeepDeletes(false); + } + // TODO: Write compacted version? + return reader.getProcedures(); + } + + public static void writeHeader(OutputStream stream, ProcedureWALHeader header) + throws IOException { + header.writeDelimitedTo(stream); + } + + /* + * +-----------------+ + * | END OF WAL DATA | <---+ + * +-----------------+ | + * | | | + * | Tracker | | + * | | | + * +-----------------+ | + * | version | | + * +-----------------+ | + * | TRAILER_MAGIC | | + * +-----------------+ | + * | offset |-----+ + * +-----------------+ + */ + public static void writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker) + throws IOException { + long offset = stream.getPos(); + + // Write EOF Entry + ProcedureWALEntry.newBuilder() + .setType(ProcedureWALEntry.Type.EOF) + .build().writeDelimitedTo(stream); + + // Write Tracker + tracker.writeTo(stream); + + stream.write(TRAILER_VERSION); + StreamUtils.writeLong(stream, TRAILER_MAGIC); + StreamUtils.writeLong(stream, offset); + } + + public static ProcedureWALHeader readHeader(InputStream stream) + throws IOException { + ProcedureWALHeader header; + try { + header = ProcedureWALHeader.parseDelimitedFrom(stream); + } catch (InvalidProtocolBufferException e) { + throw new InvalidWALDataException(e); + } + + if (header == null) { + throw new InvalidWALDataException("No data available to read the Header"); + } + + if (header.getVersion() < 0 || header.getVersion() != HEADER_VERSION) { + throw new InvalidWALDataException("Invalid Header version. got " + header.getVersion() + + " expected " + HEADER_VERSION); + } + + if (header.getType() < 0 || header.getType() > LOG_TYPE_MAX_VALID) { + throw new InvalidWALDataException("Invalid header type. got " + header.getType()); + } + + return header; + } + + public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long startPos, long size) + throws IOException { + long trailerPos = size - 17; // Beginning of the Trailer Jump + + if (trailerPos < startPos) { + throw new InvalidWALDataException("Missing trailer: size=" + size + " startPos=" + startPos); + } + + stream.seek(trailerPos); + int version = stream.read(); + if (version != TRAILER_VERSION) { + throw new InvalidWALDataException("Invalid Trailer version. got " + version + + " expected " + TRAILER_VERSION); + } + + long magic = StreamUtils.readLong(stream); + if (magic != TRAILER_MAGIC) { + throw new InvalidWALDataException("Invalid Trailer magic. got " + magic + + " expected " + TRAILER_MAGIC); + } + + long trailerOffset = StreamUtils.readLong(stream); + stream.seek(trailerOffset); + + ProcedureWALEntry entry = readEntry(stream); + if (entry.getType() != ProcedureWALEntry.Type.EOF) { + throw new InvalidWALDataException("Invalid Trailer begin"); + } + + ProcedureWALTrailer trailer = ProcedureWALTrailer.newBuilder() + .setVersion(version) + .setTrackerPos(stream.getPos()) + .build(); + return trailer; + } + + public static ProcedureWALEntry readEntry(InputStream stream) throws IOException { + return ProcedureWALEntry.parseDelimitedFrom(stream); + } + + public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, + Procedure proc, Procedure[] subprocs) throws IOException { + ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); + builder.setType(type); + builder.addProcedure(Procedure.convert(proc)); + if (subprocs != null) { + for (int i = 0; i < subprocs.length; ++i) { + builder.addProcedure(Procedure.convert(subprocs[i])); + } + } + builder.build().writeDelimitedTo(slot); + } + + public static void writeInsert(ByteSlot slot, Procedure proc) + throws IOException { + writeEntry(slot, ProcedureWALEntry.Type.INIT, proc, null); + } + + public static void writeInsert(ByteSlot slot, Procedure proc, Procedure[] subprocs) + throws IOException { + writeEntry(slot, ProcedureWALEntry.Type.INSERT, proc, subprocs); + } + + public static void writeUpdate(ByteSlot slot, Procedure proc) + throws IOException { + writeEntry(slot, ProcedureWALEntry.Type.UPDATE, proc, null); + } + + public static void writeDelete(ByteSlot slot, long procId) + throws IOException { + ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); + builder.setType(ProcedureWALEntry.Type.DELETE); + builder.setProcId(procId); + builder.build().writeDelimitedTo(slot); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java new file mode 100644 index 0000000..a60b8f5 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.IOException; +import java.util.Iterator; +import java.util.Map; +import java.util.HashMap; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry; + +/** + * Helper class that loads the procedures stored in a WAL + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ProcedureWALFormatReader { + private static final Log LOG = LogFactory.getLog(ProcedureWALFormatReader.class); + + private final ProcedureStoreTracker tracker; + //private final long compactionLogId; + + private final Map procedures = new HashMap(); + private final Map localProcedures = + new HashMap(); + + private long maxProcId = 0; + + public ProcedureWALFormatReader(final ProcedureStoreTracker tracker) { + this.tracker = tracker; + } + + public void read(ProcedureWALFile log, ProcedureWALFormat.Loader loader) throws IOException { + FSDataInputStream stream = log.getStream(); + try { + boolean hasMore = true; + while (hasMore) { + ProcedureWALEntry entry = ProcedureWALFormat.readEntry(stream); + if (entry == null) { + LOG.warn("nothing left to decode. exiting with missing EOF"); + hasMore = false; + break; + } + switch (entry.getType()) { + case INIT: + readInitEntry(entry); + break; + case INSERT: + readInsertEntry(entry); + break; + case UPDATE: + case COMPACT: + readUpdateEntry(entry); + break; + case DELETE: + readDeleteEntry(entry); + break; + case EOF: + hasMore = false; + break; + default: + throw new CorruptedWALProcedureStoreException("Invalid entry: " + entry); + } + } + } catch (IOException e) { + LOG.error("got an exception while reading the procedure WAL: " + log, e); + loader.markCorruptedWAL(log, e); + } + + if (localProcedures.isEmpty()) { + LOG.info("No active entry found in state log " + log + ". removing it"); + loader.removeLog(log); + } else { + Iterator> itd = + localProcedures.entrySet().iterator(); + while (itd.hasNext()) { + Map.Entry entry = itd.next(); + itd.remove(); + + // Deserialize the procedure + Procedure proc = Procedure.convert(entry.getValue()); + procedures.put(entry.getKey(), proc); + } + + // TODO: Some procedure may be already runnables (see readInitEntry()) + // (we can also check the "update map" in the log trackers) + } + } + + public Iterator getProcedures() { + return procedures.values().iterator(); + } + + private void loadEntries(final ProcedureWALEntry entry) { + for (ProcedureProtos.Procedure proc: entry.getProcedureList()) { + maxProcId = Math.max(maxProcId, proc.getProcId()); + if (isRequired(proc.getProcId())) { + if (LOG.isTraceEnabled()) { + LOG.trace("read " + entry.getType() + " entry " + proc.getProcId()); + } + localProcedures.put(proc.getProcId(), proc); + tracker.setDeleted(proc.getProcId(), false); + } + } + } + + private void readInitEntry(final ProcedureWALEntry entry) + throws IOException { + assert entry.getProcedureCount() == 1 : "Expected only one procedure"; + // TODO: Make it runnable, before reading other files + loadEntries(entry); + } + + private void readInsertEntry(final ProcedureWALEntry entry) throws IOException { + assert entry.getProcedureCount() >= 1 : "Expected one or more procedures"; + loadEntries(entry); + } + + private void readUpdateEntry(final ProcedureWALEntry entry) throws IOException { + assert entry.getProcedureCount() == 1 : "Expected only one procedure"; + loadEntries(entry); + } + + private void readDeleteEntry(final ProcedureWALEntry entry) throws IOException { + assert entry.getProcedureCount() == 0 : "Expected no procedures"; + assert entry.hasProcId() : "expected ProcID"; + if (LOG.isTraceEnabled()) { + LOG.trace("read delete entry " + entry.getProcId()); + } + maxProcId = Math.max(maxProcId, entry.getProcId()); + localProcedures.remove(entry.getProcId()); + tracker.setDeleted(entry.getProcId(), true); + } + + private boolean isDeleted(final long procId) { + return tracker.isDeleted(procId) == ProcedureStoreTracker.DeleteState.YES; + } + + private boolean isRequired(final long procId) { + return !isDeleted(procId) && !procedures.containsKey(procId); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java new file mode 100644 index 0000000..b548b9e --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -0,0 +1,721 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.IOException; +import java.io.FileNotFoundException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.TimeUnit; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker; +import org.apache.hadoop.hbase.procedure2.util.ByteSlot; +import org.apache.hadoop.hbase.procedure2.util.StringUtils; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader; + +/** + * WAL implementation of the ProcedureStore. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class WALProcedureStore implements ProcedureStore { + private static final Log LOG = LogFactory.getLog(WALProcedureStore.class); + + public interface LeaseRecovery { + void recoverFileLease(FileSystem fs, Path path) throws IOException; + } + + private static final int MAX_RETRIES_BEFORE_ABORT = 3; + + private static final String SYNC_WAIT_MSEC_CONF_KEY = "hbase.procedure.store.wal.sync.wait.msec"; + private static final int DEFAULT_SYNC_WAIT_MSEC = 100; + + private final CopyOnWriteArrayList listeners = + new CopyOnWriteArrayList(); + + private final LinkedList logs = new LinkedList(); + private final ProcedureStoreTracker storeTracker = new ProcedureStoreTracker(); + private final AtomicBoolean running = new AtomicBoolean(false); + private final ReentrantLock lock = new ReentrantLock(); + private final Condition waitCond = lock.newCondition(); + private final Condition slotCond = lock.newCondition(); + private final Condition syncCond = lock.newCondition(); + + private final LeaseRecovery leaseRecovery; + private final Configuration conf; + private final FileSystem fs; + private final Path logDir; + + private AtomicBoolean inSync = new AtomicBoolean(false); + private ArrayBlockingQueue slotsCache = null; + private Set corruptedLogs = null; + private FSDataOutputStream stream = null; + private long totalSynced = 0; + private long flushLogId = 0; + private int slotIndex = 0; + private Thread syncThread; + private ByteSlot[] slots; + private int syncWaitMsec; + + public WALProcedureStore(final Configuration conf, final FileSystem fs, final Path logDir, + final LeaseRecovery leaseRecovery) { + this.fs = fs; + this.conf = conf; + this.logDir = logDir; + this.leaseRecovery = leaseRecovery; + } + + @Override + public void start(int numSlots) throws IOException { + if (running.getAndSet(true)) { + return; + } + + // Init buffer slots + slots = new ByteSlot[numSlots]; + slotsCache = new ArrayBlockingQueue(numSlots, true); + while (slotsCache.remainingCapacity() > 0) { + slotsCache.offer(new ByteSlot()); + } + + // Tunings + syncWaitMsec = conf.getInt(SYNC_WAIT_MSEC_CONF_KEY, DEFAULT_SYNC_WAIT_MSEC); + + // Init sync thread + syncThread = new Thread() { + @Override + public void run() { + while (running.get()) { + try { + syncLoop(); + } catch (IOException e) { + LOG.error("got an exception from the sync-loop", e); + sendAbortProcessSignal(); + } + } + } + }; + syncThread.start(); + } + + @Override + public void stop(boolean abort) { + if (!running.getAndSet(false)) { + return; + } + + LOG.info("Stopping the WAL Procedure Store"); + if (lock.tryLock()) { + try { + waitCond.signalAll(); + } finally { + lock.unlock(); + } + } + + if (!abort) { + try { + syncThread.join(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + // Close the writer + closeStream(); + + // Close the old logs + // they should be already closed, this is just in case the load fails + // and we call start() and then stop() + for (ProcedureWALFile log: logs) { + log.close(); + } + logs.clear(); + } + + @Override + public boolean isRunning() { + return running.get(); + } + + @Override + public int getNumThreads() { + return slots == null ? 0 : slots.length; + } + + public ProcedureStoreTracker getStoreTracker() { + return storeTracker; + } + + @Override + public void registerListener(ProcedureStoreListener listener) { + this.listeners.add(listener); + } + + @Override + public boolean unregisterListener(ProcedureStoreListener listener) { + return this.listeners.remove(listener); + } + + @Override + public void recoverLease() throws IOException { + LOG.info("Starting WAL Procedure Store lease recovery"); + FileStatus[] oldLogs = getLogFiles(); + while (running.get()) { + // Get Log-MaxID and recover lease on old logs + flushLogId = initOldLogs(oldLogs) + 1; + + // Create new state-log + if (!rollWriter(flushLogId)) { + // someone else has already created this log + LOG.debug("someone else has already created log " + flushLogId); + continue; + } + + // We have the lease on the log + oldLogs = getLogFiles(); + if (getMaxLogId(oldLogs) > flushLogId) { + // Someone else created new logs + LOG.debug("someone else created new logs. expected maxLogId < " + flushLogId); + logs.getLast().removeFile(); + continue; + } + + LOG.info("lease acquired flushLogId=" + flushLogId); + break; + } + } + + @Override + public Iterator load() throws IOException { + if (logs.isEmpty()) { + throw new RuntimeException("recoverLease() must be called before loading data"); + } + + // Nothing to do, If we have only the current log. + if (logs.size() == 1) { + LOG.debug("No state logs to replay"); + return null; + } + + // Load the old logs + final ArrayList toRemove = new ArrayList(); + Iterator it = logs.descendingIterator(); + it.next(); // Skip the current log + try { + return ProcedureWALFormat.load(it, storeTracker, new ProcedureWALFormat.Loader() { + @Override + public void removeLog(ProcedureWALFile log) { + toRemove.add(log); + } + + @Override + public void markCorruptedWAL(ProcedureWALFile log, IOException e) { + if (corruptedLogs == null) { + corruptedLogs = new HashSet(); + } + corruptedLogs.add(log); + // TODO: sideline corrupted log + } + }); + } finally { + if (!toRemove.isEmpty()) { + for (ProcedureWALFile log: toRemove) { + removeLogFile(log); + } + } + } + } + + @Override + public void insert(final Procedure proc, final Procedure[] subprocs) { + if (LOG.isTraceEnabled()) { + LOG.trace("insert " + proc + " subproc=" + Arrays.toString(subprocs)); + } + + ByteSlot slot = acquireSlot(); + long logId = -1; + try { + // Serialize the insert + if (subprocs != null) { + ProcedureWALFormat.writeInsert(slot, proc, subprocs); + } else { + assert !proc.hasParent(); + ProcedureWALFormat.writeInsert(slot, proc); + } + + // Push the transaction data and wait until it is persisted + logId = pushData(slot); + } catch (IOException e) { + // We are not able to serialize the procedure. + // this is a code error, and we are not able to go on. + LOG.fatal("Unable to serialize one of the procedure: proc=" + proc + + " subprocs=" + Arrays.toString(subprocs), e); + throw new RuntimeException(e); + } finally { + releaseSlot(slot); + } + + // Update the store tracker + synchronized (storeTracker) { + if (logId == flushLogId) { + storeTracker.insert(proc, subprocs); + } + } + } + + @Override + public void update(final Procedure proc) { + if (LOG.isTraceEnabled()) { + LOG.trace("update " + proc); + } + + ByteSlot slot = acquireSlot(); + long logId = -1; + try { + // Serialize the update + ProcedureWALFormat.writeUpdate(slot, proc); + + // Push the transaction data and wait until it is persisted + logId = pushData(slot); + } catch (IOException e) { + // We are not able to serialize the procedure. + // this is a code error, and we are not able to go on. + LOG.fatal("Unable to serialize the procedure: " + proc, e); + throw new RuntimeException(e); + } finally { + releaseSlot(slot); + } + + // Update the store tracker + boolean removeOldLogs = false; + synchronized (storeTracker) { + if (logId == flushLogId) { + storeTracker.update(proc); + removeOldLogs = storeTracker.isUpdated(); + } + } + + if (removeOldLogs) { + removeAllLogs(logId - 1); + } + } + + @Override + public void delete(final long procId) { + if (LOG.isTraceEnabled()) { + LOG.trace("delete " + procId); + } + + ByteSlot slot = acquireSlot(); + long logId = -1; + try { + // Serialize the delete + ProcedureWALFormat.writeDelete(slot, procId); + + // Push the transaction data and wait until it is persisted + logId = pushData(slot); + } catch (IOException e) { + // We are not able to serialize the procedure. + // this is a code error, and we are not able to go on. + LOG.fatal("Unable to serialize the procedure: " + procId, e); + throw new RuntimeException(e); + } finally { + releaseSlot(slot); + } + + boolean removeOldLogs = false; + synchronized (storeTracker) { + if (logId == flushLogId) { + storeTracker.delete(procId); + if (storeTracker.isEmpty()) { + removeOldLogs = rollWriterOrDie(logId + 1); + } + } + } + + if (removeOldLogs) { + removeAllLogs(logId); + } + } + + private ByteSlot acquireSlot() { + ByteSlot slot = slotsCache.poll(); + return slot != null ? slot : new ByteSlot(); + } + + private void releaseSlot(final ByteSlot slot) { + slot.reset(); + slotsCache.offer(slot); + } + + private long pushData(final ByteSlot slot) { + assert !logs.isEmpty() : "recoverLease() must be called before inserting data"; + long logId = -1; + + lock.lock(); + try { + // Wait for the sync to be completed + while (true) { + if (inSync.get()) { + syncCond.await(); + } else if (slotIndex == slots.length) { + slotCond.signal(); + syncCond.await(); + } else { + break; + } + } + + slots[slotIndex++] = slot; + logId = flushLogId; + + // Notify that there is new data + if (slotIndex == 1) { + waitCond.signal(); + } + + // Notify that the slots are full + if (slotIndex == slots.length) { + slotCond.signal(); + } + syncCond.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + sendAbortProcessSignal(); + } finally { + lock.unlock(); + } + return logId; + } + + private void syncLoop() throws IOException { + inSync.set(false); + while (running.get()) { + lock.lock(); + try { + // Wait until new data is available + if (slotIndex == 0) { + if (LOG.isTraceEnabled()) { + LOG.trace("Waiting for data. flushed=" + StringUtils.humanSize(totalSynced)); + } + waitCond.await(); + if (slotIndex == 0) { + // no data.. probably a stop() + continue; + } + } + + // Wait SYNC_WAIT_MSEC or the signal of "slots full" before flushing + slotCond.await(syncWaitMsec, TimeUnit.MILLISECONDS); + + inSync.set(true); + totalSynced += syncSlots(); + slotIndex = 0; + inSync.set(false); + syncCond.signalAll(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + sendAbortProcessSignal(); + } finally { + lock.unlock(); + } + } + } + + private long syncSlots() { + int retry = 0; + long totalSynced = 0; + do { + try { + totalSynced = syncSlots(stream, slots, 0, slotIndex); + break; + } catch (IOException e) { + if (++retry == MAX_RETRIES_BEFORE_ABORT) { + LOG.error("sync slot failed, abort.", e); + sendAbortProcessSignal(); + } + } + } while (running.get()); + return totalSynced; + } + + protected long syncSlots(FSDataOutputStream stream, ByteSlot[] slots, int offset, int count) + throws IOException { + long totalSynced = 0; + for (int i = 0; i < count; ++i) { + ByteSlot data = slots[offset + i]; + data.writeTo(stream); + totalSynced += data.size(); + } + stream.hsync(); + if (LOG.isTraceEnabled()) { + LOG.trace("Sync slots=" + count + '/' + slots.length + + " flushed=" + StringUtils.humanSize(totalSynced)); + } + return totalSynced; + } + + private void sendAbortProcessSignal() { + if (!this.listeners.isEmpty()) { + for (ProcedureStoreListener listener : this.listeners) { + listener.abortProcess(); + } + } + } + + private boolean rollWriterOrDie(final long logId) { + try { + return rollWriter(logId); + } catch (IOException e) { + LOG.warn("Unable to roll the log", e); + sendAbortProcessSignal(); + return false; + } + } + + private boolean rollWriter(final long logId) throws IOException { + ProcedureWALHeader header = ProcedureWALHeader.newBuilder() + .setVersion(ProcedureWALFormat.HEADER_VERSION) + .setType(ProcedureWALFormat.LOG_TYPE_STREAM) + .setMinProcId(storeTracker.getMinProcId()) + .setLogId(logId) + .build(); + + FSDataOutputStream newStream = null; + Path newLogFile = null; + long startPos = -1; + try { + newLogFile = getLogFilePath(logId); + newStream = fs.create(newLogFile, false); + ProcedureWALFormat.writeHeader(newStream, header); + startPos = newStream.getPos(); + } catch (FileAlreadyExistsException e) { + LOG.error("Log file with id=" + logId + " already exists", e); + return false; + } + lock.lock(); + try { + closeStream(); + synchronized (storeTracker) { + storeTracker.resetUpdates(); + } + stream = newStream; + flushLogId = logId; + totalSynced = 0; + logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos)); + } finally { + lock.unlock(); + } + LOG.info("Roll new state log: " + logId); + return true; + } + + private void closeStream() { + try { + if (stream != null) { + try { + ProcedureWALFormat.writeTrailer(stream, storeTracker); + } catch (IOException e) { + LOG.warn("Unable to write the trailer: " + e.getMessage()); + } + stream.close(); + } + } catch (IOException e) { + LOG.error("Unable to close the stream", e); + } finally { + stream = null; + } + } + + private void removeAllLogs(long lastLogId) { + LOG.info("Remove all state logs with ID less then " + lastLogId); + while (!logs.isEmpty()) { + ProcedureWALFile log = logs.getFirst(); + if (lastLogId < log.getLogId()) { + break; + } + + removeLogFile(log); + } + } + + private boolean removeLogFile(final ProcedureWALFile log) { + try { + LOG.debug("remove log: " + log); + log.removeFile(); + logs.remove(log); + } catch (IOException e) { + LOG.error("unable to remove log " + log, e); + return false; + } + return true; + } + + public Set getCorruptedLogs() { + return corruptedLogs; + } + + // ========================================================================== + // FileSystem Log Files helpers + // ========================================================================== + public Path getLogDir() { + return this.logDir; + } + + public FileSystem getFileSystem() { + return this.fs; + } + + protected Path getLogFilePath(final long logId) throws IOException { + return new Path(logDir, String.format("state-%020d.log", logId)); + } + + private static long getLogIdFromName(final String name) { + int end = name.lastIndexOf(".log"); + int start = name.lastIndexOf('-') + 1; + while (start < end) { + if (name.charAt(start) != '0') + break; + start++; + } + return Long.parseLong(name.substring(start, end)); + } + + private FileStatus[] getLogFiles() throws IOException { + try { + return fs.listStatus(logDir, new PathFilter() { + @Override + public boolean accept(Path path) { + String name = path.getName(); + return name.startsWith("state-") && name.endsWith(".log"); + } + }); + } catch (FileNotFoundException e) { + LOG.warn("log directory not found: " + e.getMessage()); + return null; + } + } + + private long getMaxLogId(final FileStatus[] logFiles) { + long maxLogId = 0; + if (logFiles != null && logFiles.length > 0) { + for (int i = 0; i < logFiles.length; ++i) { + maxLogId = Math.max(maxLogId, getLogIdFromName(logFiles[i].getPath().getName())); + } + } + return maxLogId; + } + + /** + * @return Max-LogID of the specified log file set + */ + private long initOldLogs(final FileStatus[] logFiles) throws IOException { + this.logs.clear(); + + long maxLogId = 0; + if (logFiles != null && logFiles.length > 0) { + for (int i = 0; i < logFiles.length; ++i) { + final Path logPath = logFiles[i].getPath(); + leaseRecovery.recoverFileLease(fs, logPath); + maxLogId = Math.max(maxLogId, getLogIdFromName(logPath.getName())); + + ProcedureWALFile log = initOldLog(logFiles[i]); + if (log != null) { + this.logs.add(log); + } + } + Collections.sort(this.logs); + initTrackerFromOldLogs(); + } + return maxLogId; + } + + private void initTrackerFromOldLogs() { + // TODO: Load the most recent tracker available + if (!logs.isEmpty()) { + ProcedureWALFile log = logs.getLast(); + try { + log.readTracker(storeTracker); + } catch (IOException e) { + LOG.error("Unable to read tracker for " + log, e); + // try the next one... + storeTracker.clear(); + storeTracker.setPartialFlag(true); + } + } + } + + private ProcedureWALFile initOldLog(final FileStatus logFile) throws IOException { + ProcedureWALFile log = new ProcedureWALFile(fs, logFile); + if (logFile.getLen() == 0) { + LOG.warn("Remove uninitialized log " + logFile); + log.removeFile(); + return null; + } + + LOG.debug("opening state-log: " + logFile); + try { + log.open(); + } catch (ProcedureWALFormat.InvalidWALDataException e) { + LOG.warn("Remove uninitialized log " + logFile, e); + log.removeFile(); + return null; + } catch (IOException e) { + String msg = "Unable to read state log: " + logFile; + LOG.error(msg, e); + throw new IOException(msg, e); + } + + if (log.isCompacted()) { + try { + log.readTrailer(); + } catch (IOException e) { + // unfinished compacted log throw it away + LOG.warn("Unfinished compacted log " + logFile, e); + log.removeFile(); + return null; + } + } + return log; + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java new file mode 100644 index 0000000..8904116 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.util; + +import java.io.IOException; +import java.io.OutputStream; +import java.util.Arrays; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. + * e.g. you write some data and you want to prepend an header that contains the data len or cksum. + * + * ByteSlot slot = new ByteSlot(); + * // write data + * slot.write(...); + * slot.write(...); + * // write header with the size of the written data + * slot.markHead(); + * slot.write(Bytes.toBytes(slot.size())); + * // flush to stream as [header, data] + * slot.writeTo(stream); + * + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class ByteSlot extends OutputStream { + private static final int DOUBLE_GROW_LIMIT = 1 << 20; + private static final int GROW_ALIGN = 128; + + private byte[] buf; + private int head; + private int size; + + public void reset() { + head = 0; + size = 0; + } + + public void markHead() { + head = size; + } + + public int getHead() { + return head; + } + + public int size() { + return size; + } + + public byte[] getBuffer() { + return buf; + } + + public void writeAt(int offset, int b) { + head = Math.min(head, offset); + buf[offset] = (byte)b; + } + + public void write(int b) { + ensureCapacity(size + 1); + buf[size++] = (byte)b; + } + + public void write(byte[] b, int off, int len) { + ensureCapacity(size + len); + System.arraycopy(b, off, buf, size, len); + size += len; + } + + public void writeTo(final OutputStream stream) throws IOException { + if (head != 0) { + stream.write(buf, head, size - head); + stream.write(buf, 0, head); + } else { + stream.write(buf, 0, size); + } + } + + private void ensureCapacity(int minCapacity) { + minCapacity = (minCapacity + (GROW_ALIGN - 1)) & -GROW_ALIGN; + if (buf == null) { + buf = new byte[minCapacity]; + } else if (minCapacity > buf.length) { + int newCapacity = buf.length << 1; + if (minCapacity > newCapacity || newCapacity > DOUBLE_GROW_LIMIT) { + newCapacity = minCapacity; + } + buf = Arrays.copyOf(buf, newCapacity); + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java new file mode 100644 index 0000000..97134c2 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.util; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public final class StringUtils { + private StringUtils() {} + + public static String humanTimeDiff(long timeDiff) { + StringBuilder buf = new StringBuilder(); + long hours = timeDiff / (60*60*1000); + long rem = (timeDiff % (60*60*1000)); + long minutes = rem / (60*1000); + rem = rem % (60*1000); + float seconds = rem / 1000.0f; + + if (hours != 0){ + buf.append(hours); + buf.append("hrs, "); + } + if (minutes != 0){ + buf.append(minutes); + buf.append("mins, "); + } + if (hours > 0 || minutes > 0) { + buf.append(seconds); + buf.append("sec"); + } else { + buf.append(String.format("%.4fsec", seconds)); + } + return buf.toString(); + } + + public static String humanSize(double size) { + if (size >= (1L << 40)) return String.format("%.1fT", size / (1L << 40)); + if (size >= (1L << 30)) return String.format("%.1fG", size / (1L << 30)); + if (size >= (1L << 20)) return String.format("%.1fM", size / (1L << 20)); + if (size >= (1L << 10)) return String.format("%.1fK", size / (1L << 10)); + return String.format("%.0f", size); + } + + public static boolean isEmpty(final String input) { + return input == null || input.length() == 0; + } + + public static String buildString(final String... parts) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < parts.length; ++i) { + sb.append(parts[i]); + } + return sb.toString(); + } + + public static StringBuilder appendStrings(final StringBuilder sb, final String... parts) { + for (int i = 0; i < parts.length; ++i) { + sb.append(parts[i]); + } + return sb; + } +} \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java new file mode 100644 index 0000000..f710ef4 --- /dev/null +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/TimeoutBlockingQueue.java @@ -0,0 +1,217 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.util; + +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class TimeoutBlockingQueue { + public static interface TimeoutRetriever { + long getTimeout(T object); + TimeUnit getTimeUnit(T object); + } + + private final ReentrantLock lock = new ReentrantLock(); + private final Condition waitCond = lock.newCondition(); + private final TimeoutRetriever timeoutRetriever; + + private E[] objects; + private int head = 0; + private int tail = 0; + + public TimeoutBlockingQueue(TimeoutRetriever timeoutRetriever) { + this(32, timeoutRetriever); + } + + @SuppressWarnings("unchecked") + public TimeoutBlockingQueue(int capacity, TimeoutRetriever timeoutRetriever) { + this.objects = (E[])new Object[capacity]; + this.timeoutRetriever = timeoutRetriever; + } + + public void dump() { + for (int i = 0; i < objects.length; ++i) { + if (i == head) { + System.out.print("[" + objects[i] + "] "); + } else if (i == tail) { + System.out.print("]" + objects[i] + "[ "); + } else { + System.out.print(objects[i] + " "); + } + } + System.out.println(); + } + + public void clear() { + lock.lock(); + try { + if (head != tail) { + for (int i = head; i < tail; ++i) { + objects[i] = null; + } + head = 0; + tail = 0; + waitCond.signal(); + } + } finally { + lock.unlock(); + } + } + + public void add(E e) { + if (e == null) throw new NullPointerException(); + + lock.lock(); + try { + addElement(e); + waitCond.signal(); + } finally { + lock.unlock(); + } + } + + @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP") + public E poll() { + lock.lock(); + try { + if (isEmpty()) { + waitCond.await(); + return null; + } + + E elem = objects[head]; + long nanos = getNanosTimeout(elem); + nanos = waitCond.awaitNanos(nanos); + return nanos > 0 ? null : removeFirst(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return null; + } finally { + lock.unlock(); + } + } + + public int size() { + return tail - head; + } + + public boolean isEmpty() { + return (tail - head) == 0; + } + + public void signalAll() { + lock.lock(); + try { + waitCond.signalAll(); + } finally { + lock.unlock(); + } + } + + private void addElement(E elem) { + int size = (tail - head); + if ((objects.length - size) == 0) { + int capacity = size + ((size < 64) ? (size + 2) : (size >> 1)); + E[] newObjects = (E[])new Object[capacity]; + + if (compareTimeouts(objects[tail - 1], elem) <= 0) { + // Append + System.arraycopy(objects, head, newObjects, 0, tail); + tail -= head; + newObjects[tail++] = elem; + } else if (compareTimeouts(objects[head], elem) > 0) { + // Prepend + System.arraycopy(objects, head, newObjects, 1, tail); + newObjects[0] = elem; + tail -= (head - 1); + } else { + // Insert in the middle + int index = upperBound(head, tail - 1, elem); + int newIndex = (index - head); + System.arraycopy(objects, head, newObjects, 0, newIndex); + newObjects[newIndex] = elem; + System.arraycopy(objects, index, newObjects, newIndex + 1, tail - index); + tail -= (head - 1); + } + head = 0; + objects = newObjects; + } else { + if (tail == objects.length) { + // shift down |-----AAAAAAA| + tail -= head; + System.arraycopy(objects, head, objects, 0, tail); + head = 0; + } + + if (tail == head || compareTimeouts(objects[tail - 1], elem) <= 0) { + // Append + objects[tail++] = elem; + } else if (head > 0 && compareTimeouts(objects[head], elem) > 0) { + // Prepend + objects[--head] = elem; + } else { + // Insert in the middle + int index = upperBound(head, tail - 1, elem); + System.arraycopy(objects, index, objects, index + 1, tail - index); + objects[index] = elem; + tail++; + } + } + } + + private E removeFirst() { + E elem = objects[head]; + objects[head] = null; + head = (head + 1) % objects.length; + if (head == 0) tail = 0; + return elem; + } + + private int upperBound(int start, int end, E key) { + while (start < end) { + int mid = (start + end) >>> 1; + E mitem = objects[mid]; + int cmp = compareTimeouts(mitem, key); + if (cmp > 0) { + end = mid; + } else { + start = mid + 1; + } + } + return start; + } + + private int compareTimeouts(final E a, final E b) { + long t1 = getNanosTimeout(a); + long t2 = getNanosTimeout(b); + return (t1 < t2) ? -1 : (t1 > t2) ? 1 : 0; + } + + private long getNanosTimeout(final E obj) { + TimeUnit unit = timeoutRetriever.getTimeUnit(obj); + long timeout = timeoutRetriever.getTimeout(obj); + return unit.toNanos(timeout); + } +} diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java new file mode 100644 index 0000000..6e7306c --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class ProcedureTestingUtility { + private static final Log LOG = LogFactory.getLog(ProcedureTestingUtility.class); + + private ProcedureTestingUtility() { + } + + public static ProcedureStore createStore(final Configuration conf, final FileSystem fs, + final Path baseDir) throws IOException { + return createWalStore(conf, fs, baseDir); + } + + public static WALProcedureStore createWalStore(final Configuration conf, final FileSystem fs, + final Path logDir) throws IOException { + return new WALProcedureStore(conf, fs, logDir, new WALProcedureStore.LeaseRecovery() { + @Override + public void recoverFileLease(FileSystem fs, Path path) throws IOException { + // no-op + } + }); + } + + public static void restart(ProcedureExecutor procExecutor) + throws Exception { + restart(procExecutor, null); + } + + public static void restart(ProcedureExecutor procExecutor, + Runnable beforeStartAction) throws Exception { + ProcedureStore procStore = procExecutor.getStore(); + int storeThreads = procExecutor.getNumThreads(); + int execThreads = procExecutor.getNumThreads(); + // stop + procExecutor.stop(); + procStore.stop(false); + procExecutor.join(); + // nothing running... + if (beforeStartAction != null) { + beforeStartAction.run(); + } + // re-start + procStore.start(storeThreads); + procExecutor.start(execThreads); + } + + public static void setKillBeforeStoreUpdate(ProcedureExecutor procExecutor, + boolean value) { + if (procExecutor.testing == null) { + procExecutor.testing = new ProcedureExecutor.Testing(); + } + procExecutor.testing.killBeforeStoreUpdate = value; + LOG.warn("Set Kill before store update to: " + procExecutor.testing.killBeforeStoreUpdate); + } + + public static void setToggleKillBeforeStoreUpdate(ProcedureExecutor procExecutor, + boolean value) { + if (procExecutor.testing == null) { + procExecutor.testing = new ProcedureExecutor.Testing(); + } + procExecutor.testing.toggleKillBeforeStoreUpdate = value; + } + + public static void toggleKillBeforeStoreUpdate(ProcedureExecutor procExecutor) { + if (procExecutor.testing == null) { + procExecutor.testing = new ProcedureExecutor.Testing(); + } + procExecutor.testing.killBeforeStoreUpdate = !procExecutor.testing.killBeforeStoreUpdate; + LOG.warn("Set Kill before store update to: " + procExecutor.testing.killBeforeStoreUpdate); + } + + public static void setKillAndToggleBeforeStoreUpdate(ProcedureExecutor procExecutor, + boolean value) { + ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, value); + ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, value); + } + + public static long submitAndWait(ProcedureExecutor procExecutor, Procedure proc) { + long procId = procExecutor.submitProcedure(proc); + waitProcedure(procExecutor, procId); + return procId; + } + + public static void waitProcedure(ProcedureExecutor procExecutor, long procId) { + while (!procExecutor.isFinished(procId) && procExecutor.isRunning()) { + Threads.sleepWithoutInterrupt(250); + } + } + + public static void waitNoProcedureRunning(ProcedureExecutor procExecutor) { + int stableRuns = 0; + while (stableRuns < 10) { + if (procExecutor.getActiveExecutorCount() > 0 || procExecutor.getRunnableSet().size() > 0) { + stableRuns = 0; + Threads.sleepWithoutInterrupt(100); + } else { + stableRuns++; + Threads.sleepWithoutInterrupt(25); + } + } + } + + public static void assertProcNotYetCompleted(ProcedureExecutor procExecutor, + long procId) { + assertFalse("expected a running proc", procExecutor.isFinished(procId)); + assertEquals(null, procExecutor.getResult(procId)); + } + + public static void assertProcNotFailed(ProcedureExecutor procExecutor, + long procId) { + ProcedureResult result = procExecutor.getResult(procId); + assertTrue("expected procedure result", result != null); + assertProcNotFailed(result); + } + + public static void assertProcNotFailed(final ProcedureResult result) { + Exception exception = result.getException(); + String msg = exception != null ? exception.toString() : "no exception found"; + assertFalse(msg, result.isFailed()); + } + + public static void assertIsAbortException(final ProcedureResult result) { + LOG.info(result.getException()); + assertEquals(true, result.isFailed()); + Throwable cause = result.getException().getCause(); + assertTrue("expected abort exception, got "+ cause, + cause instanceof ProcedureAbortedException); + } +} diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java new file mode 100644 index 0000000..7fe109e --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java @@ -0,0 +1,338 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeoutException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + +import org.junit.After; +import org.junit.Before; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestProcedureExecution { + private static final Log LOG = LogFactory.getLog(TestProcedureExecution.class); + + private static final int PROCEDURE_EXECUTOR_SLOTS = 1; + private static final Procedure NULL_PROC = null; + + private ProcedureExecutor procExecutor; + private ProcedureStore procStore; + + private HBaseCommonTestingUtility htu; + private FileSystem fs; + private Path testDir; + private Path logDir; + + @Before + public void setUp() throws IOException { + htu = new HBaseCommonTestingUtility(); + testDir = htu.getDataTestDir(); + fs = testDir.getFileSystem(htu.getConfiguration()); + assertTrue(testDir.depth() > 1); + + logDir = new Path(testDir, "proc-logs"); + procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir); + procExecutor = new ProcedureExecutor(htu.getConfiguration(), null, procStore); + procStore.start(PROCEDURE_EXECUTOR_SLOTS); + procExecutor.start(PROCEDURE_EXECUTOR_SLOTS); + } + + @After + public void tearDown() throws IOException { + procExecutor.stop(); + procStore.stop(false); + fs.delete(logDir, true); + } + + private static class TestProcedureException extends Exception { + public TestProcedureException(String msg) { super(msg); } + } + + public static class TestSequentialProcedure extends SequentialProcedure { + private final Procedure[] subProcs; + private final List state; + private final Exception failure; + private final String name; + + public TestSequentialProcedure() { + throw new UnsupportedOperationException("recovery should not be triggered here"); + } + + public TestSequentialProcedure(String name, List state, Procedure... subProcs) { + this.state = state; + this.subProcs = subProcs; + this.name = name; + this.failure = null; + } + + public TestSequentialProcedure(String name, List state, Exception failure) { + this.state = state; + this.subProcs = null; + this.name = name; + this.failure = failure; + } + + @Override + protected Procedure[] execute(Void env) { + state.add(name + "-execute"); + if (failure != null) { + setFailure(new RemoteProcedureException(name + "-failure", failure)); + return null; + } + return subProcs; + } + + @Override + protected void rollback(Void env) { + state.add(name + "-rollback"); + } + + @Override + protected boolean abort(Void env) { + state.add(name + "-abort"); + return true; + } + } + + @Test(timeout=30000) + public void testBadSubprocList() { + List state = new ArrayList(); + Procedure subProc2 = new TestSequentialProcedure("subProc2", state); + Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2, NULL_PROC); + Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc); + + // subProc1 has a "null" subprocedure which is catched as InvalidArgument + // failed state with 2 execute and 2 rollback + LOG.info(state); + ProcedureResult result = procExecutor.getResult(rootId); + LOG.info(result.getException()); + assertTrue(state.toString(), result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof IllegalArgumentException); + + assertEquals(state.toString(), 4, state.size()); + assertEquals("rootProc-execute", state.get(0)); + assertEquals("subProc1-execute", state.get(1)); + assertEquals("subProc1-rollback", state.get(2)); + assertEquals("rootProc-rollback", state.get(3)); + } + + @Test(timeout=30000) + public void testSingleSequentialProc() { + List state = new ArrayList(); + Procedure subProc2 = new TestSequentialProcedure("subProc2", state); + Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2); + Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc); + + // successful state, with 3 execute + LOG.info(state); + ProcedureResult result = procExecutor.getResult(rootId); + ProcedureTestingUtility.assertProcNotFailed(result); + assertEquals(state.toString(), 3, state.size()); + } + + @Test(timeout=30000) + public void testSingleSequentialProcRollback() { + List state = new ArrayList(); + Procedure subProc2 = new TestSequentialProcedure("subProc2", state, + new TestProcedureException("fail test")); + Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2); + Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc); + + // the 3rd proc fail, rollback after 2 successful execution + LOG.info(state); + ProcedureResult result = procExecutor.getResult(rootId); + LOG.info(result.getException()); + assertTrue(state.toString(), result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof TestProcedureException); + + assertEquals(state.toString(), 6, state.size()); + assertEquals("rootProc-execute", state.get(0)); + assertEquals("subProc1-execute", state.get(1)); + assertEquals("subProc2-execute", state.get(2)); + assertEquals("subProc2-rollback", state.get(3)); + assertEquals("subProc1-rollback", state.get(4)); + assertEquals("rootProc-rollback", state.get(5)); + } + + public static class TestFaultyRollback extends SequentialProcedure { + private int retries = 0; + + public TestFaultyRollback() { } + + @Override + protected Procedure[] execute(Void env) { + setFailure("faulty-rollback-test", new TestProcedureException("test faulty rollback")); + return null; + } + + @Override + protected void rollback(Void env) throws IOException { + if (++retries < 3) { + LOG.info("inject rollback failure " + retries); + throw new IOException("injected failure number " + retries); + } + LOG.info("execute non faulty rollback step retries=" + retries); + } + + @Override + protected boolean abort(Void env) { return false; } + } + + @Test(timeout=30000) + public void testRollbackRetriableFailure() { + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, new TestFaultyRollback()); + + ProcedureResult result = procExecutor.getResult(procId); + LOG.info(result.getException()); + assertTrue("expected a failure", result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof TestProcedureException); + } + + public static class TestWaitingProcedure extends SequentialProcedure { + private final List state; + private final boolean hasChild; + private final String name; + + public TestWaitingProcedure() { + throw new UnsupportedOperationException("recovery should not be triggered here"); + } + + public TestWaitingProcedure(String name, List state, boolean hasChild) { + this.hasChild = hasChild; + this.state = state; + this.name = name; + } + + @Override + protected Procedure[] execute(Void env) { + state.add(name + "-execute"); + setState(ProcedureState.WAITING_TIMEOUT); + return hasChild ? new Procedure[] { new TestWaitChild(name, state) } : null; + } + + @Override + protected void rollback(Void env) { + state.add(name + "-rollback"); + } + + @Override + protected boolean abort(Void env) { + state.add(name + "-abort"); + return true; + } + + public static class TestWaitChild extends SequentialProcedure { + private final List state; + private final String name; + + public TestWaitChild() { + throw new UnsupportedOperationException("recovery should not be triggered here"); + } + + public TestWaitChild(String name, List state) { + this.name = name; + this.state = state; + } + + @Override + protected Procedure[] execute(Void env) { + state.add(name + "-child-execute"); + return null; + } + + @Override + protected void rollback(Void env) { + state.add(name + "-child-rollback"); + } + + @Override + protected boolean abort(Void env) { + state.add(name + "-child-abort"); + return true; + } + } + } + + @Test(timeout=30000) + public void testAbortTimeout() { + final int PROC_TIMEOUT_MSEC = 2500; + List state = new ArrayList(); + Procedure proc = new TestWaitingProcedure("wproc", state, false); + proc.setTimeout(PROC_TIMEOUT_MSEC); + long startTime = EnvironmentEdgeManager.currentTime(); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + long execTime = EnvironmentEdgeManager.currentTime() - startTime; + LOG.info(state); + assertTrue("we didn't wait enough execTime=" + execTime, execTime >= PROC_TIMEOUT_MSEC); + ProcedureResult result = procExecutor.getResult(rootId); + LOG.info(result.getException()); + assertTrue(state.toString(), result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof TimeoutException); + assertEquals(state.toString(), 2, state.size()); + assertEquals("wproc-execute", state.get(0)); + assertEquals("wproc-rollback", state.get(1)); + } + + @Test(timeout=30000) + public void testAbortTimeoutWithChildren() { + List state = new ArrayList(); + Procedure proc = new TestWaitingProcedure("wproc", state, true); + proc.setTimeout(2500); + long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + LOG.info(state); + ProcedureResult result = procExecutor.getResult(rootId); + LOG.info(result.getException()); + assertTrue(state.toString(), result.isFailed()); + assertTrue(result.getException().toString(), + result.getException().getCause() instanceof TimeoutException); + assertEquals(state.toString(), 4, state.size()); + assertEquals("wproc-execute", state.get(0)); + assertEquals("wproc-child-execute", state.get(1)); + assertEquals("wproc-child-rollback", state.get(2)); + assertEquals("wproc-rollback", state.get(3)); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java new file mode 100644 index 0000000..e36a295 --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureFairRunQueues.java @@ -0,0 +1,155 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; + +@Category({MasterTests.class, SmallTests.class}) +public class TestProcedureFairRunQueues { + private static class TestRunQueue implements ProcedureFairRunQueues.FairObject { + private final int priority; + private final String name; + + private boolean available = true; + + public TestRunQueue(String name, int priority) { + this.name = name; + this.priority = priority; + } + + @Override + public String toString() { + return name; + } + + private void setAvailable(boolean available) { + this.available = available; + } + + @Override + public boolean isAvailable() { + return available; + } + + @Override + public int getPriority() { + return priority; + } + } + + @Test + public void testEmptyFairQueues() throws Exception { + ProcedureFairRunQueues fairq + = new ProcedureFairRunQueues(1); + for (int i = 0; i < 3; ++i) { + assertEquals(null, fairq.poll()); + } + } + + @Test + public void testFairQueues() throws Exception { + ProcedureFairRunQueues fairq + = new ProcedureFairRunQueues(1); + TestRunQueue a = fairq.add("A", new TestRunQueue("A", 1)); + TestRunQueue b = fairq.add("B", new TestRunQueue("B", 1)); + TestRunQueue m = fairq.add("M", new TestRunQueue("M", 2)); + + for (int i = 0; i < 3; ++i) { + assertEquals(a, fairq.poll()); + assertEquals(b, fairq.poll()); + assertEquals(m, fairq.poll()); + assertEquals(m, fairq.poll()); + } + } + + @Test + public void testFairQueuesNotAvailable() throws Exception { + ProcedureFairRunQueues fairq + = new ProcedureFairRunQueues(1); + TestRunQueue a = fairq.add("A", new TestRunQueue("A", 1)); + TestRunQueue b = fairq.add("B", new TestRunQueue("B", 1)); + TestRunQueue m = fairq.add("M", new TestRunQueue("M", 2)); + + // m is not available + m.setAvailable(false); + for (int i = 0; i < 3; ++i) { + assertEquals(a, fairq.poll()); + assertEquals(b, fairq.poll()); + } + + // m is available + m.setAvailable(true); + for (int i = 0; i < 3; ++i) { + assertEquals(m, fairq.poll()); + assertEquals(m, fairq.poll()); + assertEquals(a, fairq.poll()); + assertEquals(b, fairq.poll()); + } + + // b is not available + b.setAvailable(false); + for (int i = 0; i < 3; ++i) { + assertEquals(m, fairq.poll()); + assertEquals(m, fairq.poll()); + assertEquals(a, fairq.poll()); + } + + assertEquals(m, fairq.poll()); + m.setAvailable(false); + // m should be fetched next, but is no longer available + assertEquals(a, fairq.poll()); + assertEquals(a, fairq.poll()); + b.setAvailable(true); + for (int i = 0; i < 3; ++i) { + assertEquals(b, fairq.poll()); + assertEquals(a, fairq.poll()); + } + } + + @Test + public void testFairQueuesDelete() throws Exception { + ProcedureFairRunQueues fairq + = new ProcedureFairRunQueues(1); + TestRunQueue a = fairq.add("A", new TestRunQueue("A", 1)); + TestRunQueue b = fairq.add("B", new TestRunQueue("B", 1)); + TestRunQueue m = fairq.add("M", new TestRunQueue("M", 2)); + + // Fetch A and then remove it + assertEquals(a, fairq.poll()); + assertEquals(a, fairq.remove("A")); + + // Fetch B and then remove it + assertEquals(b, fairq.poll()); + assertEquals(b, fairq.remove("B")); + + // Fetch M and then remove it + assertEquals(m, fairq.poll()); + assertEquals(m, fairq.remove("M")); + + // nothing left + assertEquals(null, fairq.poll()); + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java new file mode 100644 index 0000000..0b7395b --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java @@ -0,0 +1,488 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.atomic.AtomicBoolean; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +import org.apache.hadoop.hbase.util.Threads; + +import org.junit.After; +import org.junit.Before; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestProcedureRecovery { + private static final Log LOG = LogFactory.getLog(TestProcedureRecovery.class); + + private static final int PROCEDURE_EXECUTOR_SLOTS = 1; + private static final Procedure NULL_PROC = null; + + private static ProcedureExecutor procExecutor; + private static ProcedureStore procStore; + private static int procSleepInterval; + + private HBaseCommonTestingUtility htu; + private FileSystem fs; + private Path testDir; + private Path logDir; + + @Before + public void setUp() throws IOException { + htu = new HBaseCommonTestingUtility(); + testDir = htu.getDataTestDir(); + fs = testDir.getFileSystem(htu.getConfiguration()); + assertTrue(testDir.depth() > 1); + + logDir = new Path(testDir, "proc-logs"); + procStore = ProcedureTestingUtility.createStore(htu.getConfiguration(), fs, logDir); + procExecutor = new ProcedureExecutor(htu.getConfiguration(), null, procStore); + procExecutor.testing = new ProcedureExecutor.Testing(); + procStore.start(PROCEDURE_EXECUTOR_SLOTS); + procExecutor.start(PROCEDURE_EXECUTOR_SLOTS); + procSleepInterval = 0; + } + + @After + public void tearDown() throws IOException { + procExecutor.stop(); + procStore.stop(false); + fs.delete(logDir, true); + } + + private void restart() throws Exception { + dumpLogDirState(); + ProcedureTestingUtility.restart(procExecutor); + dumpLogDirState(); + } + + public static class TestSingleStepProcedure extends SequentialProcedure { + private int step = 0; + + public TestSingleStepProcedure() { } + + @Override + protected Procedure[] execute(Void env) { + LOG.debug("execute procedure " + this + " step=" + step); + step++; + setResult(Bytes.toBytes(step)); + return null; + } + + @Override + protected void rollback(Void env) { } + + @Override + protected boolean abort(Void env) { return true; } + } + + public static class BaseTestStepProcedure extends SequentialProcedure { + private AtomicBoolean abort = new AtomicBoolean(false); + private int step = 0; + + @Override + protected Procedure[] execute(Void env) { + LOG.debug("execute procedure " + this + " step=" + step); + ProcedureTestingUtility.toggleKillBeforeStoreUpdate(procExecutor); + step++; + Threads.sleepWithoutInterrupt(procSleepInterval); + if (isAborted()) { + setFailure(new RemoteProcedureException(getClass().getName(), + new ProcedureAbortedException( + "got an abort at " + getClass().getName() + " step=" + step))); + return null; + } + return null; + } + + @Override + protected void rollback(Void env) { + LOG.debug("rollback procedure " + this + " step=" + step); + ProcedureTestingUtility.toggleKillBeforeStoreUpdate(procExecutor); + step++; + } + + @Override + protected boolean abort(Void env) { + abort.set(true); + return true; + } + + private boolean isAborted() { + boolean aborted = abort.get(); + BaseTestStepProcedure proc = this; + while (proc.hasParent() && !aborted) { + proc = (BaseTestStepProcedure)procExecutor.getProcedure(proc.getParentProcId()); + aborted = proc.isAborted(); + } + return aborted; + } + } + + public static class TestMultiStepProcedure extends BaseTestStepProcedure { + public TestMultiStepProcedure() { } + + @Override + public Procedure[] execute(Void env) { + super.execute(env); + return isFailed() ? null : new Procedure[] { new Step1Procedure() }; + } + + public static class Step1Procedure extends BaseTestStepProcedure { + public Step1Procedure() { } + + @Override + protected Procedure[] execute(Void env) { + super.execute(env); + return isFailed() ? null : new Procedure[] { new Step2Procedure() }; + } + } + + public static class Step2Procedure extends BaseTestStepProcedure { + public Step2Procedure() { } + } + } + + @Test + public void testNoopLoad() throws Exception { + restart(); + } + + @Test(timeout=30000) + public void testSingleStepProcRecovery() throws Exception { + Procedure proc = new TestSingleStepProcedure(); + procExecutor.testing.killBeforeStoreUpdate = true; + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + assertFalse(procExecutor.isRunning()); + procExecutor.testing.killBeforeStoreUpdate = false; + + // Restart and verify that the procedures restart + long restartTs = EnvironmentEdgeManager.currentTime(); + restart(); + waitProcedure(procId); + ProcedureResult result = procExecutor.getResult(procId); + assertTrue(result.getLastUpdate() > restartTs); + ProcedureTestingUtility.assertProcNotFailed(result); + assertEquals(1, Bytes.toInt(result.getResult())); + long resultTs = result.getLastUpdate(); + + // Verify that after another restart the result is still there + restart(); + result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertProcNotFailed(result); + assertEquals(resultTs, result.getLastUpdate()); + assertEquals(1, Bytes.toInt(result.getResult())); + } + + @Test(timeout=30000) + public void testMultiStepProcRecovery() throws Exception { + // Step 0 - kill + Procedure proc = new TestMultiStepProcedure(); + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + assertFalse(procExecutor.isRunning()); + + // Step 0 exec && Step 1 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 1 exec && step 2 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 2 exec + restart(); + waitProcedure(procId); + assertTrue(procExecutor.isRunning()); + + // The procedure is completed + ProcedureResult result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertProcNotFailed(result); + } + + @Test(timeout=30000) + public void testMultiStepRollbackRecovery() throws Exception { + // Step 0 - kill + Procedure proc = new TestMultiStepProcedure(); + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + assertFalse(procExecutor.isRunning()); + + // Step 0 exec && Step 1 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 1 exec && step 2 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 2 exec - rollback - kill + procSleepInterval = 2500; + restart(); + assertTrue(procExecutor.abort(procId)); + waitProcedure(procId); + assertFalse(procExecutor.isRunning()); + + // rollback - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // rollback - complete + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Restart the executor and get the result + restart(); + waitProcedure(procId); + + // The procedure is completed + ProcedureResult result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertIsAbortException(result); + } + + public static class TestStateMachineProcedure + extends StateMachineProcedure { + enum State { STATE_1, STATE_2, STATE_3, DONE } + + public TestStateMachineProcedure() {} + + private AtomicBoolean aborted = new AtomicBoolean(false); + private int iResult = 0; + + @Override + protected StateMachineProcedure.Flow executeFromState(Void env, State state) { + switch (state) { + case STATE_1: + LOG.info("execute step 1 " + this); + setNextState(State.STATE_2); + iResult += 3; + break; + case STATE_2: + LOG.info("execute step 2 " + this); + setNextState(State.STATE_3); + iResult += 5; + break; + case STATE_3: + LOG.info("execute step 3 " + this); + Threads.sleepWithoutInterrupt(procSleepInterval); + if (aborted.get()) { + LOG.info("aborted step 3 " + this); + setAbortFailure("test", "aborted"); + break; + } + setNextState(State.DONE); + iResult += 7; + setResult(Bytes.toBytes(iResult)); + return Flow.NO_MORE_STATE; + default: + throw new UnsupportedOperationException(); + } + return Flow.HAS_MORE_STATE; + } + + @Override + protected void rollbackState(Void env, final State state) { + switch (state) { + case STATE_1: + LOG.info("rollback step 1 " + this); + break; + case STATE_2: + LOG.info("rollback step 2 " + this); + break; + case STATE_3: + LOG.info("rollback step 3 " + this); + break; + default: + throw new UnsupportedOperationException(); + } + } + + @Override + protected State getState(final int stateId) { + return State.values()[stateId]; + } + + @Override + protected int getStateId(final State state) { + return state.ordinal(); + } + + @Override + protected State getInitialState() { + return State.STATE_1; + } + + @Override + protected boolean abort(Void env) { + aborted.set(true); + return true; + } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException { + super.serializeStateData(stream); + stream.write(Bytes.toBytes(iResult)); + } + + @Override + protected void deserializeStateData(final InputStream stream) throws IOException { + super.deserializeStateData(stream); + byte[] data = new byte[4]; + stream.read(data); + iResult = Bytes.toInt(data); + } + } + + @Test(timeout=30000) + public void testStateMachineRecovery() throws Exception { + ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, true); + ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, true); + + // Step 1 - kill + Procedure proc = new TestStateMachineProcedure(); + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + assertFalse(procExecutor.isRunning()); + + // Step 1 exec && Step 2 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 2 exec && step 3 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 3 exec + restart(); + waitProcedure(procId); + assertTrue(procExecutor.isRunning()); + + // The procedure is completed + ProcedureResult result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertProcNotFailed(result); + assertEquals(15, Bytes.toInt(result.getResult())); + } + + @Test(timeout=30000) + public void testStateMachineRollbackRecovery() throws Exception { + ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, true); + ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, true); + + // Step 1 - kill + Procedure proc = new TestStateMachineProcedure(); + long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 1 exec && Step 2 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 2 exec && step 3 - kill + restart(); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Step 3 exec - rollback step 3 - kill + procSleepInterval = 2500; + restart(); + assertTrue(procExecutor.abort(procId)); + waitProcedure(procId); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + assertFalse(procExecutor.isRunning()); + + // Rollback step 3 - rollback step 2 - kill + restart(); + waitProcedure(procId); + assertFalse(procExecutor.isRunning()); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + + // Rollback step 2 - step 1 - kill + restart(); + waitProcedure(procId); + assertFalse(procExecutor.isRunning()); + ProcedureTestingUtility.assertProcNotYetCompleted(procExecutor, procId); + + // Rollback step 1 - complete + restart(); + waitProcedure(procId); + assertTrue(procExecutor.isRunning()); + + // The procedure is completed + ProcedureResult result = procExecutor.getResult(procId); + ProcedureTestingUtility.assertIsAbortException(result); + } + + private void waitProcedure(final long procId) { + ProcedureTestingUtility.waitProcedure(procExecutor, procId); + dumpLogDirState(); + } + + private void dumpLogDirState() { + try { + FileStatus[] files = fs.listStatus(logDir); + if (files != null && files.length > 0) { + for (FileStatus file: files) { + assertTrue(file.toString(), file.isFile()); + LOG.debug("log file " + file.getPath() + " size=" + file.getLen()); + } + } else { + LOG.debug("no files under: " + logDir); + } + } catch (IOException e) { + LOG.warn("Unable to dump " + logDir, e); + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java new file mode 100644 index 0000000..88645ed --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java @@ -0,0 +1,226 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2; + +import java.io.IOException; +import java.util.ArrayList; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; +import org.apache.hadoop.hbase.testclassification.LargeTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; + +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, LargeTests.class}) +public class TestProcedureReplayOrder { + private static final Log LOG = LogFactory.getLog(TestProcedureReplayOrder.class); + + private static final Procedure NULL_PROC = null; + + private ProcedureExecutor procExecutor; + private TestProcedureEnv procEnv; + private ProcedureStore procStore; + + private HBaseCommonTestingUtility htu; + private FileSystem fs; + private Path testDir; + private Path logDir; + + @Before + public void setUp() throws IOException { + htu = new HBaseCommonTestingUtility(); + htu.getConfiguration().setInt("hbase.procedure.store.wal.sync.wait.msec", 10); + + testDir = htu.getDataTestDir(); + fs = testDir.getFileSystem(htu.getConfiguration()); + assertTrue(testDir.depth() > 1); + + logDir = new Path(testDir, "proc-logs"); + procEnv = new TestProcedureEnv(); + procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir); + procExecutor = new ProcedureExecutor(htu.getConfiguration(), procEnv, procStore); + procStore.start(24); + procExecutor.start(1); + } + + @After + public void tearDown() throws IOException { + procExecutor.stop(); + procStore.stop(false); + fs.delete(logDir, true); + } + + @Test(timeout=90000) + public void testSingleStepReplyOrder() throws Exception { + // avoid the procedure to be runnable + procEnv.setAcquireLock(false); + + // submit the procedures + submitProcedures(16, 25, TestSingleStepProcedure.class); + + // restart the executor and allow the procedures to run + ProcedureTestingUtility.restart(procExecutor, new Runnable() { + @Override + public void run() { + procEnv.setAcquireLock(true); + } + }); + + // wait the execution of all the procedures and + // assert that the execution order was sorted by procId + ProcedureTestingUtility.waitNoProcedureRunning(procExecutor); + procEnv.assertSortedExecList(); + + // TODO: FIXME: This should be revisited + } + + @Ignore + @Test(timeout=90000) + public void testMultiStepReplyOrder() throws Exception { + // avoid the procedure to be runnable + procEnv.setAcquireLock(false); + + // submit the procedures + submitProcedures(16, 10, TestTwoStepProcedure.class); + + // restart the executor and allow the procedures to run + ProcedureTestingUtility.restart(procExecutor, new Runnable() { + @Override + public void run() { + procEnv.setAcquireLock(true); + } + }); + + fail("TODO: FIXME: NOT IMPLEMENT REPLAY ORDER"); + } + + private void submitProcedures(final int nthreads, final int nprocPerThread, + final Class procClazz) throws Exception { + Thread[] submitThreads = new Thread[nthreads]; + for (int i = 0; i < submitThreads.length; ++i) { + submitThreads[i] = new Thread() { + @Override + public void run() { + for (int i = 0; i < nprocPerThread; ++i) { + try { + procExecutor.submitProcedure((Procedure)procClazz.newInstance()); + } catch (InstantiationException|IllegalAccessException e) { + LOG.error("unable to instantiate the procedure", e); + fail("failure during the proc.newInstance(): " + e.getMessage()); + } + } + } + }; + } + + for (int i = 0; i < submitThreads.length; ++i) { + submitThreads[i].start(); + } + + for (int i = 0; i < submitThreads.length; ++i) { + submitThreads[i].join(); + } + } + + private static class TestProcedureEnv { + private ArrayList execList = new ArrayList(); + private boolean acquireLock = true; + + public void setAcquireLock(boolean acquireLock) { + this.acquireLock = acquireLock; + } + + public boolean canAcquireLock() { + return acquireLock; + } + + public void addToExecList(final Procedure proc) { + execList.add(proc.getProcId()); + } + + public ArrayList getExecList() { + return execList; + } + + public void assertSortedExecList() { + LOG.debug("EXEC LIST: " + execList); + for (int i = 1; i < execList.size(); ++i) { + assertTrue("exec list not sorted: " + execList.get(i-1) + " >= " + execList.get(i), + execList.get(i-1) < execList.get(i)); + } + } + } + + public static class TestSingleStepProcedure extends SequentialProcedure { + public TestSingleStepProcedure() { } + + @Override + protected Procedure[] execute(TestProcedureEnv env) { + LOG.debug("execute procedure " + this); + env.addToExecList(this); + return null; + } + + protected boolean acquireLock(final TestProcedureEnv env) { + return env.canAcquireLock(); + } + + @Override + protected void rollback(TestProcedureEnv env) { } + + @Override + protected boolean abort(TestProcedureEnv env) { return true; } + } + + public static class TestTwoStepProcedure extends SequentialProcedure { + public TestTwoStepProcedure() { } + + @Override + protected Procedure[] execute(TestProcedureEnv env) { + LOG.debug("execute procedure " + this); + env.addToExecList(this); + return new Procedure[] { new TestSingleStepProcedure() }; + } + + protected boolean acquireLock(final TestProcedureEnv env) { + return true; + } + + @Override + protected void rollback(TestProcedureEnv env) { } + + @Override + protected boolean abort(TestProcedureEnv env) { return true; } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java new file mode 100644 index 0000000..0669549 --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java @@ -0,0 +1,168 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store; + +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestProcedureStoreTracker { + private static final Log LOG = LogFactory.getLog(TestProcedureStoreTracker.class); + + static class TestProcedure extends Procedure { + public TestProcedure(long procId) { + setProcId(procId); + } + + @Override + protected Procedure[] execute(Void env) { return null; } + + @Override + protected void rollback(Void env) { /* no-op */ } + + @Override + protected boolean abort(Void env) { return false; } + + @Override + protected void serializeStateData(final OutputStream stream) { /* no-op */ } + + @Override + protected void deserializeStateData(final InputStream stream) { /* no-op */ } + } + + @Test + public void testSeqInsertAndDelete() { + ProcedureStoreTracker tracker = new ProcedureStoreTracker(); + assertTrue(tracker.isEmpty()); + + final int MIN_PROC = 1; + final int MAX_PROC = 1 << 10; + + // sequential insert + for (int i = MIN_PROC; i < MAX_PROC; ++i) { + tracker.insert(i); + + // All the proc that we inserted should not be deleted + for (int j = MIN_PROC; j <= i; ++j) { + assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(j)); + } + // All the proc that are not yet inserted should be result as deleted + for (int j = i + 1; j < MAX_PROC; ++j) { + assertTrue(tracker.isDeleted(j) != ProcedureStoreTracker.DeleteState.NO); + } + } + + // sequential delete + for (int i = MIN_PROC; i < MAX_PROC; ++i) { + tracker.delete(i); + + // All the proc that we deleted should be deleted + for (int j = MIN_PROC; j <= i; ++j) { + assertEquals(ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(j)); + } + // All the proc that are not yet deleted should be result as not deleted + for (int j = i + 1; j < MAX_PROC; ++j) { + assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(j)); + } + } + assertTrue(tracker.isEmpty()); + } + + @Test + public void testPartialTracker() { + ProcedureStoreTracker tracker = new ProcedureStoreTracker(); + tracker.setPartialFlag(true); + + // nothing in the tracker, the state is unknown + assertTrue(tracker.isEmpty()); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(1)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(579)); + + // Mark 1 as deleted, now that is a known state + tracker.setDeleted(1, true); + tracker.dump(); + assertEquals(ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(1)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(2)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(579)); + + // Mark 579 as non-deleted, now that is a known state + tracker.setDeleted(579, false); + assertEquals(ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(1)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(2)); + assertEquals(ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(579)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(577)); + assertEquals(ProcedureStoreTracker.DeleteState.MAYBE, tracker.isDeleted(580)); + } + + @Test + public void testBasicCRUD() { + ProcedureStoreTracker tracker = new ProcedureStoreTracker(); + assertTrue(tracker.isEmpty()); + + Procedure[] procs = new TestProcedure[] { + new TestProcedure(1), new TestProcedure(2), new TestProcedure(3), + new TestProcedure(4), new TestProcedure(5), new TestProcedure(6), + }; + + tracker.insert(procs[0], null); + tracker.insert(procs[1], new Procedure[] { procs[2], procs[3], procs[4] }); + assertFalse(tracker.isEmpty()); + assertTrue(tracker.isUpdated()); + + tracker.resetUpdates(); + assertFalse(tracker.isUpdated()); + + for (int i = 0; i < 4; ++i) { + tracker.update(procs[i]); + assertFalse(tracker.isEmpty()); + assertFalse(tracker.isUpdated()); + } + + tracker.update(procs[4]); + assertFalse(tracker.isEmpty()); + assertTrue(tracker.isUpdated()); + + tracker.update(procs[5]); + assertFalse(tracker.isEmpty()); + assertTrue(tracker.isUpdated()); + + for (int i = 0; i < 5; ++i) { + tracker.delete(procs[i].getProcId()); + assertFalse(tracker.isEmpty()); + assertTrue(tracker.isUpdated()); + } + tracker.delete(procs[5].getProcId()); + assertTrue(tracker.isEmpty()); + } +} diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java new file mode 100644 index 0000000..344b28b --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java @@ -0,0 +1,267 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.store.wal; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Iterator; +import java.util.HashSet; +import java.util.Set; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseCommonTestingUtility; +import org.apache.hadoop.hbase.procedure2.Procedure; +import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility; +import org.apache.hadoop.hbase.procedure2.SequentialProcedure; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.IOUtils; + +import org.junit.After; +import org.junit.Before; +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestWALProcedureStore { + private static final Log LOG = LogFactory.getLog(TestWALProcedureStore.class); + + private static final int PROCEDURE_STORE_SLOTS = 1; + private static final Procedure NULL_PROC = null; + + private WALProcedureStore procStore; + + private HBaseCommonTestingUtility htu; + private FileSystem fs; + private Path testDir; + private Path logDir; + + @Before + public void setUp() throws IOException { + htu = new HBaseCommonTestingUtility(); + testDir = htu.getDataTestDir(); + fs = testDir.getFileSystem(htu.getConfiguration()); + assertTrue(testDir.depth() > 1); + + logDir = new Path(testDir, "proc-logs"); + procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir); + procStore.start(PROCEDURE_STORE_SLOTS); + procStore.recoverLease(); + } + + @After + public void tearDown() throws IOException { + procStore.stop(false); + fs.delete(logDir, true); + } + + private Iterator storeRestart() throws Exception { + procStore.stop(false); + procStore.start(PROCEDURE_STORE_SLOTS); + procStore.recoverLease(); + return procStore.load(); + } + + @Test + public void testEmptyLogLoad() throws Exception { + Iterator loader = storeRestart(); + assertEquals(0, countProcedures(loader)); + } + + @Test + public void testLoad() throws Exception { + Set procIds = new HashSet<>(); + + // Insert something in the log + Procedure proc1 = new TestSequentialProcedure(); + procIds.add(proc1.getProcId()); + procStore.insert(proc1, null); + + Procedure proc2 = new TestSequentialProcedure(); + Procedure[] child2 = new Procedure[2]; + child2[0] = new TestSequentialProcedure(); + child2[1] = new TestSequentialProcedure(); + + procIds.add(proc2.getProcId()); + procIds.add(child2[0].getProcId()); + procIds.add(child2[1].getProcId()); + procStore.insert(proc2, child2); + + // Verify that everything is there + verifyProcIdsOnRestart(procIds); + + // Update and delete something + procStore.update(proc1); + procStore.update(child2[1]); + procStore.delete(child2[1].getProcId()); + procIds.remove(child2[1].getProcId()); + + // Verify that everything is there + verifyProcIdsOnRestart(procIds); + + // Remove 4 byte from the trailers + procStore.stop(false); + FileStatus[] logs = fs.listStatus(logDir); + assertEquals(3, logs.length); + for (int i = 0; i < logs.length; ++i) { + corruptLog(logs[i], 4); + } + verifyProcIdsOnRestart(procIds); + } + + @Test + public void testCorruptedTrailer() throws Exception { + // Insert something + for (int i = 0; i < 100; ++i) { + procStore.insert(new TestSequentialProcedure(), null); + } + + // Stop the store + procStore.stop(false); + + // Remove 4 byte from the trailer + FileStatus[] logs = fs.listStatus(logDir); + assertEquals(1, logs.length); + corruptLog(logs[0], 4); + + int count = countProcedures(storeRestart()); + assertEquals(100, count); + } + + @Test + public void testCorruptedEntries() throws Exception { + // Insert something + for (int i = 0; i < 100; ++i) { + procStore.insert(new TestSequentialProcedure(), null); + } + + // Stop the store + procStore.stop(false); + + // Remove some byte from the log + // (enough to cut the trailer and corrupt some entries) + FileStatus[] logs = fs.listStatus(logDir); + assertEquals(1, logs.length); + corruptLog(logs[0], 1823); + + int count = countProcedures(storeRestart()); + assertTrue(procStore.getCorruptedLogs() != null); + assertEquals(1, procStore.getCorruptedLogs().size()); + assertEquals(85, count); + } + + private void corruptLog(final FileStatus logFile, final long dropBytes) + throws IOException { + assertTrue(logFile.getLen() > dropBytes); + LOG.debug("corrupt log " + logFile.getPath() + + " size=" + logFile.getLen() + " drop=" + dropBytes); + Path tmpPath = new Path(testDir, "corrupted.log"); + InputStream in = fs.open(logFile.getPath()); + OutputStream out = fs.create(tmpPath); + IOUtils.copyBytes(in, out, logFile.getLen() - dropBytes, true); + fs.rename(tmpPath, logFile.getPath()); + } + + private void verifyProcIdsOnRestart(final Set procIds) throws Exception { + int count = 0; + Iterator loader = storeRestart(); + while (loader.hasNext()) { + Procedure proc = loader.next(); + LOG.debug("loading procId=" + proc.getProcId()); + assertTrue("procId=" + proc.getProcId() + " unexpected", procIds.contains(proc.getProcId())); + count++; + } + assertEquals(procIds.size(), count); + } + + private void assertIsEmpty(Iterator iterator) { + assertEquals(0, countProcedures(iterator)); + } + + private int countProcedures(Iterator iterator) { + int count = 0; + while (iterator.hasNext()) { + Procedure proc = iterator.next(); + LOG.trace("loading procId=" + proc.getProcId()); + count++; + } + return count; + } + + private void assertEmptyLogDir() { + try { + FileStatus[] status = fs.listStatus(logDir); + assertTrue("expected empty state-log dir", status == null || status.length == 0); + } catch (FileNotFoundException e) { + fail("expected the state-log dir to be present: " + logDir); + } catch (IOException e) { + fail("got en exception on state-log dir list: " + e.getMessage()); + } + } + + public static class TestSequentialProcedure extends SequentialProcedure { + private static long seqid = 0; + + public TestSequentialProcedure() { + setProcId(++seqid); + } + + @Override + protected Procedure[] execute(Void env) { return null; } + + @Override + protected void rollback(Void env) { } + + @Override + protected boolean abort(Void env) { return false; } + + @Override + protected void serializeStateData(final OutputStream stream) throws IOException { + long procId = getProcId(); + if (procId % 2 == 0) { + stream.write(Bytes.toBytes(procId)); + } + } + + @Override + protected void deserializeStateData(InputStream stream) throws IOException { + long procId = getProcId(); + if (procId % 2 == 0) { + byte[] bProcId = new byte[8]; + assertEquals(8, stream.read(bProcId)); + assertEquals(procId, Bytes.toLong(bProcId)); + } else { + assertEquals(0, stream.available()); + } + } + } +} \ No newline at end of file diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java new file mode 100644 index 0000000..aff536a --- /dev/null +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestTimeoutBlockingQueue.java @@ -0,0 +1,137 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hbase.procedure2.util; + + +import java.util.Arrays; +import java.util.concurrent.TimeUnit; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hbase.procedure2.util.TimeoutBlockingQueue.TimeoutRetriever; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.testclassification.MasterTests; + +import org.junit.Assert; +import org.junit.Test; +import org.junit.experimental.categories.Category; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +@Category({MasterTests.class, SmallTests.class}) +public class TestTimeoutBlockingQueue { + private static final Log LOG = LogFactory.getLog(TestTimeoutBlockingQueue.class); + + static class TestObject { + private long timeout; + private int seqId; + + public TestObject(int seqId, long timeout) { + this.timeout = timeout; + this.seqId = seqId; + } + + public long getTimeout() { + return timeout; + } + + public String toString() { + return String.format("(%03d, %03d)", seqId, timeout); + } + } + + static class TestObjectTimeoutRetriever implements TimeoutRetriever { + @Override + public long getTimeout(TestObject obj) { + return obj.getTimeout(); + } + + @Override + public TimeUnit getTimeUnit(TestObject obj) { + return TimeUnit.MILLISECONDS; + } + } + + @Test + public void testOrder() { + TimeoutBlockingQueue queue = + new TimeoutBlockingQueue(8, new TestObjectTimeoutRetriever()); + + long[] timeouts = new long[] {500, 200, 700, 300, 600, 600, 200, 800, 500}; + + for (int i = 0; i < timeouts.length; ++i) { + for (int j = 0; j <= i; ++j) { + queue.add(new TestObject(j, timeouts[j])); + queue.dump(); + } + + long prev = 0; + for (int j = 0; j <= i; ++j) { + TestObject obj = queue.poll(); + assertTrue(obj.getTimeout() >= prev); + prev = obj.getTimeout(); + queue.dump(); + } + } + } + + @Test + public void testTimeoutBlockingQueue() { + TimeoutBlockingQueue queue; + + int[][] testArray = new int[][] { + {200, 400, 600}, // append + {200, 400, 100}, // prepend + {200, 400, 300}, // insert + }; + + for (int i = 0; i < testArray.length; ++i) { + int[] sortedArray = Arrays.copyOf(testArray[i], testArray[i].length); + Arrays.sort(sortedArray); + + // test with head == 0 + queue = new TimeoutBlockingQueue(2, new TestObjectTimeoutRetriever()); + for (int j = 0; j < testArray[i].length; ++j) { + queue.add(new TestObject(j, testArray[i][j])); + queue.dump(); + } + + for (int j = 0; !queue.isEmpty(); ++j) { + assertEquals(sortedArray[j], queue.poll().getTimeout()); + } + + queue = new TimeoutBlockingQueue(2, new TestObjectTimeoutRetriever()); + queue.add(new TestObject(0, 50)); + assertEquals(50, queue.poll().getTimeout()); + + // test with head > 0 + for (int j = 0; j < testArray[i].length; ++j) { + queue.add(new TestObject(j, testArray[i][j])); + queue.dump(); + } + + for (int j = 0; !queue.isEmpty(); ++j) { + assertEquals(sortedArray[j], queue.poll().getTimeout()); + } + } + } +} diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index 7787c52..0d33332 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -176,6 +176,7 @@ MapReduce.proto Master.proto MultiRowMutation.proto + Procedure.proto Quota.proto RegionServerStatus.proto RowProcessor.proto diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java new file mode 100644 index 0000000..3c7dcdb --- /dev/null +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ProcedureProtos.java @@ -0,0 +1,7219 @@ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: Procedure.proto + +package org.apache.hadoop.hbase.protobuf.generated; + +public final class ProcedureProtos { + private ProcedureProtos() {} + public static void registerAllExtensions( + com.google.protobuf.ExtensionRegistry registry) { + } + /** + * Protobuf enum {@code ProcedureState} + */ + public enum ProcedureState + implements com.google.protobuf.ProtocolMessageEnum { + /** + * INITIALIZING = 1; + * + *

+     * Procedure in construction, not yet added to the executor
+     * 
+ */ + INITIALIZING(0, 1), + /** + * RUNNABLE = 2; + * + *
+     * Procedure added to the executor, and ready to be executed
+     * 
+ */ + RUNNABLE(1, 2), + /** + * WAITING = 3; + * + *
+     * The procedure is waiting on children to be completed
+     * 
+ */ + WAITING(2, 3), + /** + * WAITING_TIMEOUT = 4; + * + *
+     * The procedure is waiting a timout or an external event
+     * 
+ */ + WAITING_TIMEOUT(3, 4), + /** + * ROLLEDBACK = 5; + * + *
+     * The procedure failed and was rolledback
+     * 
+ */ + ROLLEDBACK(4, 5), + /** + * FINISHED = 6; + * + *
+     * The procedure execution is completed. may need a rollback if failed.
+     * 
+ */ + FINISHED(5, 6), + ; + + /** + * INITIALIZING = 1; + * + *
+     * Procedure in construction, not yet added to the executor
+     * 
+ */ + public static final int INITIALIZING_VALUE = 1; + /** + * RUNNABLE = 2; + * + *
+     * Procedure added to the executor, and ready to be executed
+     * 
+ */ + public static final int RUNNABLE_VALUE = 2; + /** + * WAITING = 3; + * + *
+     * The procedure is waiting on children to be completed
+     * 
+ */ + public static final int WAITING_VALUE = 3; + /** + * WAITING_TIMEOUT = 4; + * + *
+     * The procedure is waiting a timout or an external event
+     * 
+ */ + public static final int WAITING_TIMEOUT_VALUE = 4; + /** + * ROLLEDBACK = 5; + * + *
+     * The procedure failed and was rolledback
+     * 
+ */ + public static final int ROLLEDBACK_VALUE = 5; + /** + * FINISHED = 6; + * + *
+     * The procedure execution is completed. may need a rollback if failed.
+     * 
+ */ + public static final int FINISHED_VALUE = 6; + + + public final int getNumber() { return value; } + + public static ProcedureState valueOf(int value) { + switch (value) { + case 1: return INITIALIZING; + case 2: return RUNNABLE; + case 3: return WAITING; + case 4: return WAITING_TIMEOUT; + case 5: return ROLLEDBACK; + case 6: return FINISHED; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ProcedureState findValueByNumber(int number) { + return ProcedureState.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.getDescriptor().getEnumTypes().get(0); + } + + private static final ProcedureState[] VALUES = values(); + + public static ProcedureState valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private ProcedureState(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ProcedureState) + } + + public interface ProcedureOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required string class_name = 1; + /** + * required string class_name = 1; + * + *
+     * internal "static" state
+     * 
+ */ + boolean hasClassName(); + /** + * required string class_name = 1; + * + *
+     * internal "static" state
+     * 
+ */ + java.lang.String getClassName(); + /** + * required string class_name = 1; + * + *
+     * internal "static" state
+     * 
+ */ + com.google.protobuf.ByteString + getClassNameBytes(); + + // optional uint64 parent_id = 2; + /** + * optional uint64 parent_id = 2; + * + *
+     * parent if not a root-procedure otherwise not set
+     * 
+ */ + boolean hasParentId(); + /** + * optional uint64 parent_id = 2; + * + *
+     * parent if not a root-procedure otherwise not set
+     * 
+ */ + long getParentId(); + + // required uint64 proc_id = 3; + /** + * required uint64 proc_id = 3; + */ + boolean hasProcId(); + /** + * required uint64 proc_id = 3; + */ + long getProcId(); + + // required uint64 start_time = 4; + /** + * required uint64 start_time = 4; + */ + boolean hasStartTime(); + /** + * required uint64 start_time = 4; + */ + long getStartTime(); + + // optional string owner = 5; + /** + * optional string owner = 5; + */ + boolean hasOwner(); + /** + * optional string owner = 5; + */ + java.lang.String getOwner(); + /** + * optional string owner = 5; + */ + com.google.protobuf.ByteString + getOwnerBytes(); + + // required .ProcedureState state = 6; + /** + * required .ProcedureState state = 6; + * + *
+     * internal "runtime" state
+     * 
+ */ + boolean hasState(); + /** + * required .ProcedureState state = 6; + * + *
+     * internal "runtime" state
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState getState(); + + // repeated uint32 stack_id = 7; + /** + * repeated uint32 stack_id = 7; + * + *
+     * stack indices in case the procedure was running
+     * 
+ */ + java.util.List getStackIdList(); + /** + * repeated uint32 stack_id = 7; + * + *
+     * stack indices in case the procedure was running
+     * 
+ */ + int getStackIdCount(); + /** + * repeated uint32 stack_id = 7; + * + *
+     * stack indices in case the procedure was running
+     * 
+ */ + int getStackId(int index); + + // required uint64 last_update = 8; + /** + * required uint64 last_update = 8; + */ + boolean hasLastUpdate(); + /** + * required uint64 last_update = 8; + */ + long getLastUpdate(); + + // optional uint32 timeout = 9; + /** + * optional uint32 timeout = 9; + */ + boolean hasTimeout(); + /** + * optional uint32 timeout = 9; + */ + int getTimeout(); + + // optional .ForeignExceptionMessage exception = 10; + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+     * user state/results
+     * 
+ */ + boolean hasException(); + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+     * user state/results
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException(); + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+     * user state/results
+     * 
+ */ + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder(); + + // optional bytes result = 11; + /** + * optional bytes result = 11; + * + *
+     * opaque (user) result structure
+     * 
+ */ + boolean hasResult(); + /** + * optional bytes result = 11; + * + *
+     * opaque (user) result structure
+     * 
+ */ + com.google.protobuf.ByteString getResult(); + + // optional bytes state_data = 12; + /** + * optional bytes state_data = 12; + * + *
+     * opaque (user) procedure internal-state
+     * 
+ */ + boolean hasStateData(); + /** + * optional bytes state_data = 12; + * + *
+     * opaque (user) procedure internal-state
+     * 
+ */ + com.google.protobuf.ByteString getStateData(); + } + /** + * Protobuf type {@code Procedure} + * + *
+   **
+   * Procedure metadata, serialized by the ProcedureStore to be able to recover the old state.
+   * 
+ */ + public static final class Procedure extends + com.google.protobuf.GeneratedMessage + implements ProcedureOrBuilder { + // Use Procedure.newBuilder() to construct. + private Procedure(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private Procedure(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Procedure defaultInstance; + public static Procedure getDefaultInstance() { + return defaultInstance; + } + + public Procedure getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Procedure( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + bitField0_ |= 0x00000001; + className_ = input.readBytes(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + parentId_ = input.readUInt64(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + procId_ = input.readUInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + startTime_ = input.readUInt64(); + break; + } + case 42: { + bitField0_ |= 0x00000010; + owner_ = input.readBytes(); + break; + } + case 48: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState value = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(6, rawValue); + } else { + bitField0_ |= 0x00000020; + state_ = value; + } + break; + } + case 56: { + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + stackId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + stackId_.add(input.readUInt32()); + break; + } + case 58: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000040) == 0x00000040) && input.getBytesUntilLimit() > 0) { + stackId_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000040; + } + while (input.getBytesUntilLimit() > 0) { + stackId_.add(input.readUInt32()); + } + input.popLimit(limit); + break; + } + case 64: { + bitField0_ |= 0x00000040; + lastUpdate_ = input.readUInt64(); + break; + } + case 72: { + bitField0_ |= 0x00000080; + timeout_ = input.readUInt32(); + break; + } + case 82: { + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder subBuilder = null; + if (((bitField0_ & 0x00000100) == 0x00000100)) { + subBuilder = exception_.toBuilder(); + } + exception_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.PARSER, extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(exception_); + exception_ = subBuilder.buildPartial(); + } + bitField0_ |= 0x00000100; + break; + } + case 90: { + bitField0_ |= 0x00000200; + result_ = input.readBytes(); + break; + } + case 98: { + bitField0_ |= 0x00000400; + stateData_ = input.readBytes(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000040) == 0x00000040)) { + stackId_ = java.util.Collections.unmodifiableList(stackId_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Procedure parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Procedure(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required string class_name = 1; + public static final int CLASS_NAME_FIELD_NUMBER = 1; + private java.lang.Object className_; + /** + * required string class_name = 1; + * + *
+     * internal "static" state
+     * 
+ */ + public boolean hasClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string class_name = 1; + * + *
+     * internal "static" state
+     * 
+ */ + public java.lang.String getClassName() { + java.lang.Object ref = className_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + className_ = s; + } + return s; + } + } + /** + * required string class_name = 1; + * + *
+     * internal "static" state
+     * 
+ */ + public com.google.protobuf.ByteString + getClassNameBytes() { + java.lang.Object ref = className_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + className_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // optional uint64 parent_id = 2; + public static final int PARENT_ID_FIELD_NUMBER = 2; + private long parentId_; + /** + * optional uint64 parent_id = 2; + * + *
+     * parent if not a root-procedure otherwise not set
+     * 
+ */ + public boolean hasParentId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 parent_id = 2; + * + *
+     * parent if not a root-procedure otherwise not set
+     * 
+ */ + public long getParentId() { + return parentId_; + } + + // required uint64 proc_id = 3; + public static final int PROC_ID_FIELD_NUMBER = 3; + private long procId_; + /** + * required uint64 proc_id = 3; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 proc_id = 3; + */ + public long getProcId() { + return procId_; + } + + // required uint64 start_time = 4; + public static final int START_TIME_FIELD_NUMBER = 4; + private long startTime_; + /** + * required uint64 start_time = 4; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_time = 4; + */ + public long getStartTime() { + return startTime_; + } + + // optional string owner = 5; + public static final int OWNER_FIELD_NUMBER = 5; + private java.lang.Object owner_; + /** + * optional string owner = 5; + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string owner = 5; + */ + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = + (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + if (bs.isValidUtf8()) { + owner_ = s; + } + return s; + } + } + /** + * optional string owner = 5; + */ + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + // required .ProcedureState state = 6; + public static final int STATE_FIELD_NUMBER = 6; + private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState state_; + /** + * required .ProcedureState state = 6; + * + *
+     * internal "runtime" state
+     * 
+ */ + public boolean hasState() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required .ProcedureState state = 6; + * + *
+     * internal "runtime" state
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState getState() { + return state_; + } + + // repeated uint32 stack_id = 7; + public static final int STACK_ID_FIELD_NUMBER = 7; + private java.util.List stackId_; + /** + * repeated uint32 stack_id = 7; + * + *
+     * stack indices in case the procedure was running
+     * 
+ */ + public java.util.List + getStackIdList() { + return stackId_; + } + /** + * repeated uint32 stack_id = 7; + * + *
+     * stack indices in case the procedure was running
+     * 
+ */ + public int getStackIdCount() { + return stackId_.size(); + } + /** + * repeated uint32 stack_id = 7; + * + *
+     * stack indices in case the procedure was running
+     * 
+ */ + public int getStackId(int index) { + return stackId_.get(index); + } + + // required uint64 last_update = 8; + public static final int LAST_UPDATE_FIELD_NUMBER = 8; + private long lastUpdate_; + /** + * required uint64 last_update = 8; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000040) == 0x00000040); + } + /** + * required uint64 last_update = 8; + */ + public long getLastUpdate() { + return lastUpdate_; + } + + // optional uint32 timeout = 9; + public static final int TIMEOUT_FIELD_NUMBER = 9; + private int timeout_; + /** + * optional uint32 timeout = 9; + */ + public boolean hasTimeout() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * optional uint32 timeout = 9; + */ + public int getTimeout() { + return timeout_; + } + + // optional .ForeignExceptionMessage exception = 10; + public static final int EXCEPTION_FIELD_NUMBER = 10; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_; + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+     * user state/results
+     * 
+ */ + public boolean hasException() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+     * user state/results
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + return exception_; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+     * user state/results
+     * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + return exception_; + } + + // optional bytes result = 11; + public static final int RESULT_FIELD_NUMBER = 11; + private com.google.protobuf.ByteString result_; + /** + * optional bytes result = 11; + * + *
+     * opaque (user) result structure
+     * 
+ */ + public boolean hasResult() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional bytes result = 11; + * + *
+     * opaque (user) result structure
+     * 
+ */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + + // optional bytes state_data = 12; + public static final int STATE_DATA_FIELD_NUMBER = 12; + private com.google.protobuf.ByteString stateData_; + /** + * optional bytes state_data = 12; + * + *
+     * opaque (user) procedure internal-state
+     * 
+ */ + public boolean hasStateData() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional bytes state_data = 12; + * + *
+     * opaque (user) procedure internal-state
+     * 
+ */ + public com.google.protobuf.ByteString getStateData() { + return stateData_; + } + + private void initFields() { + className_ = ""; + parentId_ = 0L; + procId_ = 0L; + startTime_ = 0L; + owner_ = ""; + state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING; + stackId_ = java.util.Collections.emptyList(); + lastUpdate_ = 0L; + timeout_ = 0; + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + result_ = com.google.protobuf.ByteString.EMPTY; + stateData_ = com.google.protobuf.ByteString.EMPTY; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasClassName()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasProcId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasStartTime()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasState()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasLastUpdate()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, parentId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, procId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, startTime_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getOwnerBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeEnum(6, state_.getNumber()); + } + for (int i = 0; i < stackId_.size(); i++) { + output.writeUInt32(7, stackId_.get(i)); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeUInt64(8, lastUpdate_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeUInt32(9, timeout_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeMessage(10, exception_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + output.writeBytes(11, result_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + output.writeBytes(12, stateData_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getClassNameBytes()); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, parentId_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, procId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, startTime_); + } + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getOwnerBytes()); + } + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(6, state_.getNumber()); + } + { + int dataSize = 0; + for (int i = 0; i < stackId_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt32SizeNoTag(stackId_.get(i)); + } + size += dataSize; + size += 1 * getStackIdList().size(); + } + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(8, lastUpdate_); + } + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(9, timeout_); + } + if (((bitField0_ & 0x00000100) == 0x00000100)) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(10, exception_); + } + if (((bitField0_ & 0x00000200) == 0x00000200)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(11, result_); + } + if (((bitField0_ & 0x00000400) == 0x00000400)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(12, stateData_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure) obj; + + boolean result = true; + result = result && (hasClassName() == other.hasClassName()); + if (hasClassName()) { + result = result && getClassName() + .equals(other.getClassName()); + } + result = result && (hasParentId() == other.hasParentId()); + if (hasParentId()) { + result = result && (getParentId() + == other.getParentId()); + } + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && (hasStartTime() == other.hasStartTime()); + if (hasStartTime()) { + result = result && (getStartTime() + == other.getStartTime()); + } + result = result && (hasOwner() == other.hasOwner()); + if (hasOwner()) { + result = result && getOwner() + .equals(other.getOwner()); + } + result = result && (hasState() == other.hasState()); + if (hasState()) { + result = result && + (getState() == other.getState()); + } + result = result && getStackIdList() + .equals(other.getStackIdList()); + result = result && (hasLastUpdate() == other.hasLastUpdate()); + if (hasLastUpdate()) { + result = result && (getLastUpdate() + == other.getLastUpdate()); + } + result = result && (hasTimeout() == other.hasTimeout()); + if (hasTimeout()) { + result = result && (getTimeout() + == other.getTimeout()); + } + result = result && (hasException() == other.hasException()); + if (hasException()) { + result = result && getException() + .equals(other.getException()); + } + result = result && (hasResult() == other.hasResult()); + if (hasResult()) { + result = result && getResult() + .equals(other.getResult()); + } + result = result && (hasStateData() == other.hasStateData()); + if (hasStateData()) { + result = result && getStateData() + .equals(other.getStateData()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasClassName()) { + hash = (37 * hash) + CLASS_NAME_FIELD_NUMBER; + hash = (53 * hash) + getClassName().hashCode(); + } + if (hasParentId()) { + hash = (37 * hash) + PARENT_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getParentId()); + } + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + if (hasStartTime()) { + hash = (37 * hash) + START_TIME_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartTime()); + } + if (hasOwner()) { + hash = (37 * hash) + OWNER_FIELD_NUMBER; + hash = (53 * hash) + getOwner().hashCode(); + } + if (hasState()) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getState()); + } + if (getStackIdCount() > 0) { + hash = (37 * hash) + STACK_ID_FIELD_NUMBER; + hash = (53 * hash) + getStackIdList().hashCode(); + } + if (hasLastUpdate()) { + hash = (37 * hash) + LAST_UPDATE_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLastUpdate()); + } + if (hasTimeout()) { + hash = (37 * hash) + TIMEOUT_FIELD_NUMBER; + hash = (53 * hash) + getTimeout(); + } + if (hasException()) { + hash = (37 * hash) + EXCEPTION_FIELD_NUMBER; + hash = (53 * hash) + getException().hashCode(); + } + if (hasResult()) { + hash = (37 * hash) + RESULT_FIELD_NUMBER; + hash = (53 * hash) + getResult().hashCode(); + } + if (hasStateData()) { + hash = (37 * hash) + STATE_DATA_FIELD_NUMBER; + hash = (53 * hash) + getStateData().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code Procedure} + * + *
+     **
+     * Procedure metadata, serialized by the ProcedureStore to be able to recover the old state.
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getExceptionFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + className_ = ""; + bitField0_ = (bitField0_ & ~0x00000001); + parentId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + startTime_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + owner_ = ""; + bitField0_ = (bitField0_ & ~0x00000010); + state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING; + bitField0_ = (bitField0_ & ~0x00000020); + stackId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + lastUpdate_ = 0L; + bitField0_ = (bitField0_ & ~0x00000080); + timeout_ = 0; + bitField0_ = (bitField0_ & ~0x00000100); + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + result_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000400); + stateData_ = com.google.protobuf.ByteString.EMPTY; + bitField0_ = (bitField0_ & ~0x00000800); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_Procedure_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.className_ = className_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.parentId_ = parentId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.procId_ = procId_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.startTime_ = startTime_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { + to_bitField0_ |= 0x00000010; + } + result.owner_ = owner_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { + to_bitField0_ |= 0x00000020; + } + result.state_ = state_; + if (((bitField0_ & 0x00000040) == 0x00000040)) { + stackId_ = java.util.Collections.unmodifiableList(stackId_); + bitField0_ = (bitField0_ & ~0x00000040); + } + result.stackId_ = stackId_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { + to_bitField0_ |= 0x00000040; + } + result.lastUpdate_ = lastUpdate_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { + to_bitField0_ |= 0x00000080; + } + result.timeout_ = timeout_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { + to_bitField0_ |= 0x00000100; + } + if (exceptionBuilder_ == null) { + result.exception_ = exception_; + } else { + result.exception_ = exceptionBuilder_.build(); + } + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { + to_bitField0_ |= 0x00000200; + } + result.result_ = result_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { + to_bitField0_ |= 0x00000400; + } + result.stateData_ = stateData_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()) return this; + if (other.hasClassName()) { + bitField0_ |= 0x00000001; + className_ = other.className_; + onChanged(); + } + if (other.hasParentId()) { + setParentId(other.getParentId()); + } + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + if (other.hasStartTime()) { + setStartTime(other.getStartTime()); + } + if (other.hasOwner()) { + bitField0_ |= 0x00000010; + owner_ = other.owner_; + onChanged(); + } + if (other.hasState()) { + setState(other.getState()); + } + if (!other.stackId_.isEmpty()) { + if (stackId_.isEmpty()) { + stackId_ = other.stackId_; + bitField0_ = (bitField0_ & ~0x00000040); + } else { + ensureStackIdIsMutable(); + stackId_.addAll(other.stackId_); + } + onChanged(); + } + if (other.hasLastUpdate()) { + setLastUpdate(other.getLastUpdate()); + } + if (other.hasTimeout()) { + setTimeout(other.getTimeout()); + } + if (other.hasException()) { + mergeException(other.getException()); + } + if (other.hasResult()) { + setResult(other.getResult()); + } + if (other.hasStateData()) { + setStateData(other.getStateData()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasClassName()) { + + return false; + } + if (!hasProcId()) { + + return false; + } + if (!hasStartTime()) { + + return false; + } + if (!hasState()) { + + return false; + } + if (!hasLastUpdate()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required string class_name = 1; + private java.lang.Object className_ = ""; + /** + * required string class_name = 1; + * + *
+       * internal "static" state
+       * 
+ */ + public boolean hasClassName() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required string class_name = 1; + * + *
+       * internal "static" state
+       * 
+ */ + public java.lang.String getClassName() { + java.lang.Object ref = className_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + className_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * required string class_name = 1; + * + *
+       * internal "static" state
+       * 
+ */ + public com.google.protobuf.ByteString + getClassNameBytes() { + java.lang.Object ref = className_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + className_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * required string class_name = 1; + * + *
+       * internal "static" state
+       * 
+ */ + public Builder setClassName( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + className_ = value; + onChanged(); + return this; + } + /** + * required string class_name = 1; + * + *
+       * internal "static" state
+       * 
+ */ + public Builder clearClassName() { + bitField0_ = (bitField0_ & ~0x00000001); + className_ = getDefaultInstance().getClassName(); + onChanged(); + return this; + } + /** + * required string class_name = 1; + * + *
+       * internal "static" state
+       * 
+ */ + public Builder setClassNameBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + className_ = value; + onChanged(); + return this; + } + + // optional uint64 parent_id = 2; + private long parentId_ ; + /** + * optional uint64 parent_id = 2; + * + *
+       * parent if not a root-procedure otherwise not set
+       * 
+ */ + public boolean hasParentId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 parent_id = 2; + * + *
+       * parent if not a root-procedure otherwise not set
+       * 
+ */ + public long getParentId() { + return parentId_; + } + /** + * optional uint64 parent_id = 2; + * + *
+       * parent if not a root-procedure otherwise not set
+       * 
+ */ + public Builder setParentId(long value) { + bitField0_ |= 0x00000002; + parentId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 parent_id = 2; + * + *
+       * parent if not a root-procedure otherwise not set
+       * 
+ */ + public Builder clearParentId() { + bitField0_ = (bitField0_ & ~0x00000002); + parentId_ = 0L; + onChanged(); + return this; + } + + // required uint64 proc_id = 3; + private long procId_ ; + /** + * required uint64 proc_id = 3; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 proc_id = 3; + */ + public long getProcId() { + return procId_; + } + /** + * required uint64 proc_id = 3; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000004; + procId_ = value; + onChanged(); + return this; + } + /** + * required uint64 proc_id = 3; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000004); + procId_ = 0L; + onChanged(); + return this; + } + + // required uint64 start_time = 4; + private long startTime_ ; + /** + * required uint64 start_time = 4; + */ + public boolean hasStartTime() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 start_time = 4; + */ + public long getStartTime() { + return startTime_; + } + /** + * required uint64 start_time = 4; + */ + public Builder setStartTime(long value) { + bitField0_ |= 0x00000008; + startTime_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_time = 4; + */ + public Builder clearStartTime() { + bitField0_ = (bitField0_ & ~0x00000008); + startTime_ = 0L; + onChanged(); + return this; + } + + // optional string owner = 5; + private java.lang.Object owner_ = ""; + /** + * optional string owner = 5; + */ + public boolean hasOwner() { + return ((bitField0_ & 0x00000010) == 0x00000010); + } + /** + * optional string owner = 5; + */ + public java.lang.String getOwner() { + java.lang.Object ref = owner_; + if (!(ref instanceof java.lang.String)) { + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + owner_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + /** + * optional string owner = 5; + */ + public com.google.protobuf.ByteString + getOwnerBytes() { + java.lang.Object ref = owner_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8( + (java.lang.String) ref); + owner_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + /** + * optional string owner = 5; + */ + public Builder setOwner( + java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + owner_ = value; + onChanged(); + return this; + } + /** + * optional string owner = 5; + */ + public Builder clearOwner() { + bitField0_ = (bitField0_ & ~0x00000010); + owner_ = getDefaultInstance().getOwner(); + onChanged(); + return this; + } + /** + * optional string owner = 5; + */ + public Builder setOwnerBytes( + com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000010; + owner_ = value; + onChanged(); + return this; + } + + // required .ProcedureState state = 6; + private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING; + /** + * required .ProcedureState state = 6; + * + *
+       * internal "runtime" state
+       * 
+ */ + public boolean hasState() { + return ((bitField0_ & 0x00000020) == 0x00000020); + } + /** + * required .ProcedureState state = 6; + * + *
+       * internal "runtime" state
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState getState() { + return state_; + } + /** + * required .ProcedureState state = 6; + * + *
+       * internal "runtime" state
+       * 
+ */ + public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000020; + state_ = value; + onChanged(); + return this; + } + /** + * required .ProcedureState state = 6; + * + *
+       * internal "runtime" state
+       * 
+ */ + public Builder clearState() { + bitField0_ = (bitField0_ & ~0x00000020); + state_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureState.INITIALIZING; + onChanged(); + return this; + } + + // repeated uint32 stack_id = 7; + private java.util.List stackId_ = java.util.Collections.emptyList(); + private void ensureStackIdIsMutable() { + if (!((bitField0_ & 0x00000040) == 0x00000040)) { + stackId_ = new java.util.ArrayList(stackId_); + bitField0_ |= 0x00000040; + } + } + /** + * repeated uint32 stack_id = 7; + * + *
+       * stack indices in case the procedure was running
+       * 
+ */ + public java.util.List + getStackIdList() { + return java.util.Collections.unmodifiableList(stackId_); + } + /** + * repeated uint32 stack_id = 7; + * + *
+       * stack indices in case the procedure was running
+       * 
+ */ + public int getStackIdCount() { + return stackId_.size(); + } + /** + * repeated uint32 stack_id = 7; + * + *
+       * stack indices in case the procedure was running
+       * 
+ */ + public int getStackId(int index) { + return stackId_.get(index); + } + /** + * repeated uint32 stack_id = 7; + * + *
+       * stack indices in case the procedure was running
+       * 
+ */ + public Builder setStackId( + int index, int value) { + ensureStackIdIsMutable(); + stackId_.set(index, value); + onChanged(); + return this; + } + /** + * repeated uint32 stack_id = 7; + * + *
+       * stack indices in case the procedure was running
+       * 
+ */ + public Builder addStackId(int value) { + ensureStackIdIsMutable(); + stackId_.add(value); + onChanged(); + return this; + } + /** + * repeated uint32 stack_id = 7; + * + *
+       * stack indices in case the procedure was running
+       * 
+ */ + public Builder addAllStackId( + java.lang.Iterable values) { + ensureStackIdIsMutable(); + super.addAll(values, stackId_); + onChanged(); + return this; + } + /** + * repeated uint32 stack_id = 7; + * + *
+       * stack indices in case the procedure was running
+       * 
+ */ + public Builder clearStackId() { + stackId_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000040); + onChanged(); + return this; + } + + // required uint64 last_update = 8; + private long lastUpdate_ ; + /** + * required uint64 last_update = 8; + */ + public boolean hasLastUpdate() { + return ((bitField0_ & 0x00000080) == 0x00000080); + } + /** + * required uint64 last_update = 8; + */ + public long getLastUpdate() { + return lastUpdate_; + } + /** + * required uint64 last_update = 8; + */ + public Builder setLastUpdate(long value) { + bitField0_ |= 0x00000080; + lastUpdate_ = value; + onChanged(); + return this; + } + /** + * required uint64 last_update = 8; + */ + public Builder clearLastUpdate() { + bitField0_ = (bitField0_ & ~0x00000080); + lastUpdate_ = 0L; + onChanged(); + return this; + } + + // optional uint32 timeout = 9; + private int timeout_ ; + /** + * optional uint32 timeout = 9; + */ + public boolean hasTimeout() { + return ((bitField0_ & 0x00000100) == 0x00000100); + } + /** + * optional uint32 timeout = 9; + */ + public int getTimeout() { + return timeout_; + } + /** + * optional uint32 timeout = 9; + */ + public Builder setTimeout(int value) { + bitField0_ |= 0x00000100; + timeout_ = value; + onChanged(); + return this; + } + /** + * optional uint32 timeout = 9; + */ + public Builder clearTimeout() { + bitField0_ = (bitField0_ & ~0x00000100); + timeout_ = 0; + onChanged(); + return this; + } + + // optional .ForeignExceptionMessage exception = 10; + private org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> exceptionBuilder_; + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+       * user state/results
+       * 
+ */ + public boolean hasException() { + return ((bitField0_ & 0x00000200) == 0x00000200); + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+       * user state/results
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage getException() { + if (exceptionBuilder_ == null) { + return exception_; + } else { + return exceptionBuilder_.getMessage(); + } + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+       * user state/results
+       * 
+ */ + public Builder setException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + exception_ = value; + onChanged(); + } else { + exceptionBuilder_.setMessage(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+       * user state/results
+       * 
+ */ + public Builder setException( + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder builderForValue) { + if (exceptionBuilder_ == null) { + exception_ = builderForValue.build(); + onChanged(); + } else { + exceptionBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+       * user state/results
+       * 
+ */ + public Builder mergeException(org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage value) { + if (exceptionBuilder_ == null) { + if (((bitField0_ & 0x00000200) == 0x00000200) && + exception_ != org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance()) { + exception_ = + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.newBuilder(exception_).mergeFrom(value).buildPartial(); + } else { + exception_ = value; + } + onChanged(); + } else { + exceptionBuilder_.mergeFrom(value); + } + bitField0_ |= 0x00000200; + return this; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+       * user state/results
+       * 
+ */ + public Builder clearException() { + if (exceptionBuilder_ == null) { + exception_ = org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.getDefaultInstance(); + onChanged(); + } else { + exceptionBuilder_.clear(); + } + bitField0_ = (bitField0_ & ~0x00000200); + return this; + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+       * user state/results
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder getExceptionBuilder() { + bitField0_ |= 0x00000200; + onChanged(); + return getExceptionFieldBuilder().getBuilder(); + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+       * user state/results
+       * 
+ */ + public org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder getExceptionOrBuilder() { + if (exceptionBuilder_ != null) { + return exceptionBuilder_.getMessageOrBuilder(); + } else { + return exception_; + } + } + /** + * optional .ForeignExceptionMessage exception = 10; + * + *
+       * user state/results
+       * 
+ */ + private com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder> + getExceptionFieldBuilder() { + if (exceptionBuilder_ == null) { + exceptionBuilder_ = new com.google.protobuf.SingleFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage.Builder, org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessageOrBuilder>( + exception_, + getParentForChildren(), + isClean()); + exception_ = null; + } + return exceptionBuilder_; + } + + // optional bytes result = 11; + private com.google.protobuf.ByteString result_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes result = 11; + * + *
+       * opaque (user) result structure
+       * 
+ */ + public boolean hasResult() { + return ((bitField0_ & 0x00000400) == 0x00000400); + } + /** + * optional bytes result = 11; + * + *
+       * opaque (user) result structure
+       * 
+ */ + public com.google.protobuf.ByteString getResult() { + return result_; + } + /** + * optional bytes result = 11; + * + *
+       * opaque (user) result structure
+       * 
+ */ + public Builder setResult(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000400; + result_ = value; + onChanged(); + return this; + } + /** + * optional bytes result = 11; + * + *
+       * opaque (user) result structure
+       * 
+ */ + public Builder clearResult() { + bitField0_ = (bitField0_ & ~0x00000400); + result_ = getDefaultInstance().getResult(); + onChanged(); + return this; + } + + // optional bytes state_data = 12; + private com.google.protobuf.ByteString stateData_ = com.google.protobuf.ByteString.EMPTY; + /** + * optional bytes state_data = 12; + * + *
+       * opaque (user) procedure internal-state
+       * 
+ */ + public boolean hasStateData() { + return ((bitField0_ & 0x00000800) == 0x00000800); + } + /** + * optional bytes state_data = 12; + * + *
+       * opaque (user) procedure internal-state
+       * 
+ */ + public com.google.protobuf.ByteString getStateData() { + return stateData_; + } + /** + * optional bytes state_data = 12; + * + *
+       * opaque (user) procedure internal-state
+       * 
+ */ + public Builder setStateData(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000800; + stateData_ = value; + onChanged(); + return this; + } + /** + * optional bytes state_data = 12; + * + *
+       * opaque (user) procedure internal-state
+       * 
+ */ + public Builder clearStateData() { + bitField0_ = (bitField0_ & ~0x00000800); + stateData_ = getDefaultInstance().getStateData(); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:Procedure) + } + + static { + defaultInstance = new Procedure(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:Procedure) + } + + public interface SequentialProcedureDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required bool executed = 1; + /** + * required bool executed = 1; + */ + boolean hasExecuted(); + /** + * required bool executed = 1; + */ + boolean getExecuted(); + } + /** + * Protobuf type {@code SequentialProcedureData} + * + *
+   **
+   * SequentialProcedure data
+   * 
+ */ + public static final class SequentialProcedureData extends + com.google.protobuf.GeneratedMessage + implements SequentialProcedureDataOrBuilder { + // Use SequentialProcedureData.newBuilder() to construct. + private SequentialProcedureData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private SequentialProcedureData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SequentialProcedureData defaultInstance; + public static SequentialProcedureData getDefaultInstance() { + return defaultInstance; + } + + public SequentialProcedureData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private SequentialProcedureData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + executed_ = input.readBool(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SequentialProcedureData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SequentialProcedureData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required bool executed = 1; + public static final int EXECUTED_FIELD_NUMBER = 1; + private boolean executed_; + /** + * required bool executed = 1; + */ + public boolean hasExecuted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool executed = 1; + */ + public boolean getExecuted() { + return executed_; + } + + private void initFields() { + executed_ = false; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasExecuted()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBool(1, executed_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBoolSize(1, executed_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData) obj; + + boolean result = true; + result = result && (hasExecuted() == other.hasExecuted()); + if (hasExecuted()) { + result = result && (getExecuted() + == other.getExecuted()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasExecuted()) { + hash = (37 * hash) + EXECUTED_FIELD_NUMBER; + hash = (53 * hash) + hashBoolean(getExecuted()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code SequentialProcedureData} + * + *
+     **
+     * SequentialProcedure data
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + executed_ = false; + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_SequentialProcedureData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.executed_ = executed_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData.getDefaultInstance()) return this; + if (other.hasExecuted()) { + setExecuted(other.getExecuted()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasExecuted()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.SequentialProcedureData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required bool executed = 1; + private boolean executed_ ; + /** + * required bool executed = 1; + */ + public boolean hasExecuted() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required bool executed = 1; + */ + public boolean getExecuted() { + return executed_; + } + /** + * required bool executed = 1; + */ + public Builder setExecuted(boolean value) { + bitField0_ |= 0x00000001; + executed_ = value; + onChanged(); + return this; + } + /** + * required bool executed = 1; + */ + public Builder clearExecuted() { + bitField0_ = (bitField0_ & ~0x00000001); + executed_ = false; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:SequentialProcedureData) + } + + static { + defaultInstance = new SequentialProcedureData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:SequentialProcedureData) + } + + public interface StateMachineProcedureDataOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated uint32 state = 1; + /** + * repeated uint32 state = 1; + */ + java.util.List getStateList(); + /** + * repeated uint32 state = 1; + */ + int getStateCount(); + /** + * repeated uint32 state = 1; + */ + int getState(int index); + } + /** + * Protobuf type {@code StateMachineProcedureData} + * + *
+   **
+   * StateMachineProcedure data
+   * 
+ */ + public static final class StateMachineProcedureData extends + com.google.protobuf.GeneratedMessage + implements StateMachineProcedureDataOrBuilder { + // Use StateMachineProcedureData.newBuilder() to construct. + private StateMachineProcedureData(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private StateMachineProcedureData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final StateMachineProcedureData defaultInstance; + public static StateMachineProcedureData getDefaultInstance() { + return defaultInstance; + } + + public StateMachineProcedureData getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private StateMachineProcedureData( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + state_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + state_.add(input.readUInt32()); + break; + } + case 10: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001) && input.getBytesUntilLimit() > 0) { + state_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + while (input.getBytesUntilLimit() > 0) { + state_.add(input.readUInt32()); + } + input.popLimit(limit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + state_ = java.util.Collections.unmodifiableList(state_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public StateMachineProcedureData parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new StateMachineProcedureData(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated uint32 state = 1; + public static final int STATE_FIELD_NUMBER = 1; + private java.util.List state_; + /** + * repeated uint32 state = 1; + */ + public java.util.List + getStateList() { + return state_; + } + /** + * repeated uint32 state = 1; + */ + public int getStateCount() { + return state_.size(); + } + /** + * repeated uint32 state = 1; + */ + public int getState(int index) { + return state_.get(index); + } + + private void initFields() { + state_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < state_.size(); i++) { + output.writeUInt32(1, state_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + { + int dataSize = 0; + for (int i = 0; i < state_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt32SizeNoTag(state_.get(i)); + } + size += dataSize; + size += 1 * getStateList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData) obj; + + boolean result = true; + result = result && getStateList() + .equals(other.getStateList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getStateCount() > 0) { + hash = (37 * hash) + STATE_FIELD_NUMBER; + hash = (53 * hash) + getStateList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code StateMachineProcedureData} + * + *
+     **
+     * StateMachineProcedure data
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureDataOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + state_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_StateMachineProcedureData_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData(this); + int from_bitField0_ = bitField0_; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + state_ = java.util.Collections.unmodifiableList(state_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.state_ = state_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData.getDefaultInstance()) return this; + if (!other.state_.isEmpty()) { + if (state_.isEmpty()) { + state_ = other.state_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureStateIsMutable(); + state_.addAll(other.state_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.StateMachineProcedureData) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated uint32 state = 1; + private java.util.List state_ = java.util.Collections.emptyList(); + private void ensureStateIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + state_ = new java.util.ArrayList(state_); + bitField0_ |= 0x00000001; + } + } + /** + * repeated uint32 state = 1; + */ + public java.util.List + getStateList() { + return java.util.Collections.unmodifiableList(state_); + } + /** + * repeated uint32 state = 1; + */ + public int getStateCount() { + return state_.size(); + } + /** + * repeated uint32 state = 1; + */ + public int getState(int index) { + return state_.get(index); + } + /** + * repeated uint32 state = 1; + */ + public Builder setState( + int index, int value) { + ensureStateIsMutable(); + state_.set(index, value); + onChanged(); + return this; + } + /** + * repeated uint32 state = 1; + */ + public Builder addState(int value) { + ensureStateIsMutable(); + state_.add(value); + onChanged(); + return this; + } + /** + * repeated uint32 state = 1; + */ + public Builder addAllState( + java.lang.Iterable values) { + ensureStateIsMutable(); + super.addAll(values, state_); + onChanged(); + return this; + } + /** + * repeated uint32 state = 1; + */ + public Builder clearState() { + state_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:StateMachineProcedureData) + } + + static { + defaultInstance = new StateMachineProcedureData(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:StateMachineProcedureData) + } + + public interface ProcedureWALHeaderOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 version = 1; + /** + * required uint32 version = 1; + */ + boolean hasVersion(); + /** + * required uint32 version = 1; + */ + int getVersion(); + + // required uint32 type = 2; + /** + * required uint32 type = 2; + */ + boolean hasType(); + /** + * required uint32 type = 2; + */ + int getType(); + + // required uint64 log_id = 3; + /** + * required uint64 log_id = 3; + */ + boolean hasLogId(); + /** + * required uint64 log_id = 3; + */ + long getLogId(); + + // required uint64 min_proc_id = 4; + /** + * required uint64 min_proc_id = 4; + */ + boolean hasMinProcId(); + /** + * required uint64 min_proc_id = 4; + */ + long getMinProcId(); + } + /** + * Protobuf type {@code ProcedureWALHeader} + * + *
+   **
+   * Procedure WAL header
+   * 
+ */ + public static final class ProcedureWALHeader extends + com.google.protobuf.GeneratedMessage + implements ProcedureWALHeaderOrBuilder { + // Use ProcedureWALHeader.newBuilder() to construct. + private ProcedureWALHeader(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ProcedureWALHeader(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ProcedureWALHeader defaultInstance; + public static ProcedureWALHeader getDefaultInstance() { + return defaultInstance; + } + + public ProcedureWALHeader getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcedureWALHeader( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + version_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + type_ = input.readUInt32(); + break; + } + case 24: { + bitField0_ |= 0x00000004; + logId_ = input.readUInt64(); + break; + } + case 32: { + bitField0_ |= 0x00000008; + minProcId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ProcedureWALHeader parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcedureWALHeader(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint32 version = 1; + public static final int VERSION_FIELD_NUMBER = 1; + private int version_; + /** + * required uint32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 version = 1; + */ + public int getVersion() { + return version_; + } + + // required uint32 type = 2; + public static final int TYPE_FIELD_NUMBER = 2; + private int type_; + /** + * required uint32 type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 type = 2; + */ + public int getType() { + return type_; + } + + // required uint64 log_id = 3; + public static final int LOG_ID_FIELD_NUMBER = 3; + private long logId_; + /** + * required uint64 log_id = 3; + */ + public boolean hasLogId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 log_id = 3; + */ + public long getLogId() { + return logId_; + } + + // required uint64 min_proc_id = 4; + public static final int MIN_PROC_ID_FIELD_NUMBER = 4; + private long minProcId_; + /** + * required uint64 min_proc_id = 4; + */ + public boolean hasMinProcId() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 min_proc_id = 4; + */ + public long getMinProcId() { + return minProcId_; + } + + private void initFields() { + version_ = 0; + type_ = 0; + logId_ = 0L; + minProcId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasLogId()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasMinProcId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt32(2, type_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeUInt64(3, logId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeUInt64(4, minProcId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(2, type_); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, logId_); + } + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(4, minProcId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader) obj; + + boolean result = true; + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && (getVersion() + == other.getVersion()); + } + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && (getType() + == other.getType()); + } + result = result && (hasLogId() == other.hasLogId()); + if (hasLogId()) { + result = result && (getLogId() + == other.getLogId()); + } + result = result && (hasMinProcId() == other.hasMinProcId()); + if (hasMinProcId()) { + result = result && (getMinProcId() + == other.getMinProcId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + } + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + getType(); + } + if (hasLogId()) { + hash = (37 * hash) + LOG_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getLogId()); + } + if (hasMinProcId()) { + hash = (37 * hash) + MIN_PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getMinProcId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureWALHeader} + * + *
+     **
+     * Procedure WAL header
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeaderOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + version_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + type_ = 0; + bitField0_ = (bitField0_ & ~0x00000002); + logId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + minProcId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000008); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALHeader_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.type_ = type_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000004; + } + result.logId_ = logId_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { + to_bitField0_ |= 0x00000008; + } + result.minProcId_ = minProcId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader.getDefaultInstance()) return this; + if (other.hasVersion()) { + setVersion(other.getVersion()); + } + if (other.hasType()) { + setType(other.getType()); + } + if (other.hasLogId()) { + setLogId(other.getLogId()); + } + if (other.hasMinProcId()) { + setMinProcId(other.getMinProcId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasVersion()) { + + return false; + } + if (!hasType()) { + + return false; + } + if (!hasLogId()) { + + return false; + } + if (!hasMinProcId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALHeader) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint32 version = 1; + private int version_ ; + /** + * required uint32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 version = 1; + */ + public int getVersion() { + return version_; + } + /** + * required uint32 version = 1; + */ + public Builder setVersion(int value) { + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + /** + * required uint32 version = 1; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + version_ = 0; + onChanged(); + return this; + } + + // required uint32 type = 2; + private int type_ ; + /** + * required uint32 type = 2; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint32 type = 2; + */ + public int getType() { + return type_; + } + /** + * required uint32 type = 2; + */ + public Builder setType(int value) { + bitField0_ |= 0x00000002; + type_ = value; + onChanged(); + return this; + } + /** + * required uint32 type = 2; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000002); + type_ = 0; + onChanged(); + return this; + } + + // required uint64 log_id = 3; + private long logId_ ; + /** + * required uint64 log_id = 3; + */ + public boolean hasLogId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * required uint64 log_id = 3; + */ + public long getLogId() { + return logId_; + } + /** + * required uint64 log_id = 3; + */ + public Builder setLogId(long value) { + bitField0_ |= 0x00000004; + logId_ = value; + onChanged(); + return this; + } + /** + * required uint64 log_id = 3; + */ + public Builder clearLogId() { + bitField0_ = (bitField0_ & ~0x00000004); + logId_ = 0L; + onChanged(); + return this; + } + + // required uint64 min_proc_id = 4; + private long minProcId_ ; + /** + * required uint64 min_proc_id = 4; + */ + public boolean hasMinProcId() { + return ((bitField0_ & 0x00000008) == 0x00000008); + } + /** + * required uint64 min_proc_id = 4; + */ + public long getMinProcId() { + return minProcId_; + } + /** + * required uint64 min_proc_id = 4; + */ + public Builder setMinProcId(long value) { + bitField0_ |= 0x00000008; + minProcId_ = value; + onChanged(); + return this; + } + /** + * required uint64 min_proc_id = 4; + */ + public Builder clearMinProcId() { + bitField0_ = (bitField0_ & ~0x00000008); + minProcId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ProcedureWALHeader) + } + + static { + defaultInstance = new ProcedureWALHeader(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureWALHeader) + } + + public interface ProcedureWALTrailerOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint32 version = 1; + /** + * required uint32 version = 1; + */ + boolean hasVersion(); + /** + * required uint32 version = 1; + */ + int getVersion(); + + // required uint64 tracker_pos = 2; + /** + * required uint64 tracker_pos = 2; + */ + boolean hasTrackerPos(); + /** + * required uint64 tracker_pos = 2; + */ + long getTrackerPos(); + } + /** + * Protobuf type {@code ProcedureWALTrailer} + * + *
+   **
+   * Procedure WAL trailer
+   * 
+ */ + public static final class ProcedureWALTrailer extends + com.google.protobuf.GeneratedMessage + implements ProcedureWALTrailerOrBuilder { + // Use ProcedureWALTrailer.newBuilder() to construct. + private ProcedureWALTrailer(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ProcedureWALTrailer(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ProcedureWALTrailer defaultInstance; + public static ProcedureWALTrailer getDefaultInstance() { + return defaultInstance; + } + + public ProcedureWALTrailer getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcedureWALTrailer( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + version_ = input.readUInt32(); + break; + } + case 16: { + bitField0_ |= 0x00000002; + trackerPos_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ProcedureWALTrailer parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcedureWALTrailer(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint32 version = 1; + public static final int VERSION_FIELD_NUMBER = 1; + private int version_; + /** + * required uint32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 version = 1; + */ + public int getVersion() { + return version_; + } + + // required uint64 tracker_pos = 2; + public static final int TRACKER_POS_FIELD_NUMBER = 2; + private long trackerPos_; + /** + * required uint64 tracker_pos = 2; + */ + public boolean hasTrackerPos() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 tracker_pos = 2; + */ + public long getTrackerPos() { + return trackerPos_; + } + + private void initFields() { + version_ = 0; + trackerPos_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasVersion()) { + memoizedIsInitialized = 0; + return false; + } + if (!hasTrackerPos()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt32(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(2, trackerPos_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt32Size(1, version_); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(2, trackerPos_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer) obj; + + boolean result = true; + result = result && (hasVersion() == other.hasVersion()); + if (hasVersion()) { + result = result && (getVersion() + == other.getVersion()); + } + result = result && (hasTrackerPos() == other.hasTrackerPos()); + if (hasTrackerPos()) { + result = result && (getTrackerPos() + == other.getTrackerPos()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasVersion()) { + hash = (37 * hash) + VERSION_FIELD_NUMBER; + hash = (53 * hash) + getVersion(); + } + if (hasTrackerPos()) { + hash = (37 * hash) + TRACKER_POS_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getTrackerPos()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureWALTrailer} + * + *
+     **
+     * Procedure WAL trailer
+     * 
+ */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailerOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + version_ = 0; + bitField0_ = (bitField0_ & ~0x00000001); + trackerPos_ = 0L; + bitField0_ = (bitField0_ & ~0x00000002); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALTrailer_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { + to_bitField0_ |= 0x00000002; + } + result.trackerPos_ = trackerPos_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer.getDefaultInstance()) return this; + if (other.hasVersion()) { + setVersion(other.getVersion()); + } + if (other.hasTrackerPos()) { + setTrackerPos(other.getTrackerPos()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasVersion()) { + + return false; + } + if (!hasTrackerPos()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALTrailer) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint32 version = 1; + private int version_ ; + /** + * required uint32 version = 1; + */ + public boolean hasVersion() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint32 version = 1; + */ + public int getVersion() { + return version_; + } + /** + * required uint32 version = 1; + */ + public Builder setVersion(int value) { + bitField0_ |= 0x00000001; + version_ = value; + onChanged(); + return this; + } + /** + * required uint32 version = 1; + */ + public Builder clearVersion() { + bitField0_ = (bitField0_ & ~0x00000001); + version_ = 0; + onChanged(); + return this; + } + + // required uint64 tracker_pos = 2; + private long trackerPos_ ; + /** + * required uint64 tracker_pos = 2; + */ + public boolean hasTrackerPos() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * required uint64 tracker_pos = 2; + */ + public long getTrackerPos() { + return trackerPos_; + } + /** + * required uint64 tracker_pos = 2; + */ + public Builder setTrackerPos(long value) { + bitField0_ |= 0x00000002; + trackerPos_ = value; + onChanged(); + return this; + } + /** + * required uint64 tracker_pos = 2; + */ + public Builder clearTrackerPos() { + bitField0_ = (bitField0_ & ~0x00000002); + trackerPos_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ProcedureWALTrailer) + } + + static { + defaultInstance = new ProcedureWALTrailer(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureWALTrailer) + } + + public interface ProcedureStoreTrackerOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // repeated .ProcedureStoreTracker.TrackerNode node = 1; + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + java.util.List + getNodeList(); + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode getNode(int index); + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + int getNodeCount(); + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + java.util.List + getNodeOrBuilderList(); + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder getNodeOrBuilder( + int index); + } + /** + * Protobuf type {@code ProcedureStoreTracker} + */ + public static final class ProcedureStoreTracker extends + com.google.protobuf.GeneratedMessage + implements ProcedureStoreTrackerOrBuilder { + // Use ProcedureStoreTracker.newBuilder() to construct. + private ProcedureStoreTracker(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ProcedureStoreTracker(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ProcedureStoreTracker defaultInstance; + public static ProcedureStoreTracker getDefaultInstance() { + return defaultInstance; + } + + public ProcedureStoreTracker getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcedureStoreTracker( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + node_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; + } + node_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.PARSER, extensionRegistry)); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + node_ = java.util.Collections.unmodifiableList(node_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ProcedureStoreTracker parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcedureStoreTracker(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + public interface TrackerNodeOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required uint64 start_id = 1; + /** + * required uint64 start_id = 1; + */ + boolean hasStartId(); + /** + * required uint64 start_id = 1; + */ + long getStartId(); + + // repeated uint64 updated = 2; + /** + * repeated uint64 updated = 2; + */ + java.util.List getUpdatedList(); + /** + * repeated uint64 updated = 2; + */ + int getUpdatedCount(); + /** + * repeated uint64 updated = 2; + */ + long getUpdated(int index); + + // repeated uint64 deleted = 3; + /** + * repeated uint64 deleted = 3; + */ + java.util.List getDeletedList(); + /** + * repeated uint64 deleted = 3; + */ + int getDeletedCount(); + /** + * repeated uint64 deleted = 3; + */ + long getDeleted(int index); + } + /** + * Protobuf type {@code ProcedureStoreTracker.TrackerNode} + */ + public static final class TrackerNode extends + com.google.protobuf.GeneratedMessage + implements TrackerNodeOrBuilder { + // Use TrackerNode.newBuilder() to construct. + private TrackerNode(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private TrackerNode(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TrackerNode defaultInstance; + public static TrackerNode getDefaultInstance() { + return defaultInstance; + } + + public TrackerNode getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private TrackerNode( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + bitField0_ |= 0x00000001; + startId_ = input.readUInt64(); + break; + } + case 16: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + updated_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + updated_.add(input.readUInt64()); + break; + } + case 18: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002) && input.getBytesUntilLimit() > 0) { + updated_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + while (input.getBytesUntilLimit() > 0) { + updated_.add(input.readUInt64()); + } + input.popLimit(limit); + break; + } + case 24: { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + deleted_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + deleted_.add(input.readUInt64()); + break; + } + case 26: { + int length = input.readRawVarint32(); + int limit = input.pushLimit(length); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) { + deleted_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000004; + } + while (input.getBytesUntilLimit() > 0) { + deleted_.add(input.readUInt64()); + } + input.popLimit(limit); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + updated_ = java.util.Collections.unmodifiableList(updated_); + } + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + deleted_ = java.util.Collections.unmodifiableList(deleted_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TrackerNode parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TrackerNode(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private int bitField0_; + // required uint64 start_id = 1; + public static final int START_ID_FIELD_NUMBER = 1; + private long startId_; + /** + * required uint64 start_id = 1; + */ + public boolean hasStartId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 start_id = 1; + */ + public long getStartId() { + return startId_; + } + + // repeated uint64 updated = 2; + public static final int UPDATED_FIELD_NUMBER = 2; + private java.util.List updated_; + /** + * repeated uint64 updated = 2; + */ + public java.util.List + getUpdatedList() { + return updated_; + } + /** + * repeated uint64 updated = 2; + */ + public int getUpdatedCount() { + return updated_.size(); + } + /** + * repeated uint64 updated = 2; + */ + public long getUpdated(int index) { + return updated_.get(index); + } + + // repeated uint64 deleted = 3; + public static final int DELETED_FIELD_NUMBER = 3; + private java.util.List deleted_; + /** + * repeated uint64 deleted = 3; + */ + public java.util.List + getDeletedList() { + return deleted_; + } + /** + * repeated uint64 deleted = 3; + */ + public int getDeletedCount() { + return deleted_.size(); + } + /** + * repeated uint64 deleted = 3; + */ + public long getDeleted(int index) { + return deleted_.get(index); + } + + private void initFields() { + startId_ = 0L; + updated_ = java.util.Collections.emptyList(); + deleted_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasStartId()) { + memoizedIsInitialized = 0; + return false; + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeUInt64(1, startId_); + } + for (int i = 0; i < updated_.size(); i++) { + output.writeUInt64(2, updated_.get(i)); + } + for (int i = 0; i < deleted_.size(); i++) { + output.writeUInt64(3, deleted_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(1, startId_); + } + { + int dataSize = 0; + for (int i = 0; i < updated_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt64SizeNoTag(updated_.get(i)); + } + size += dataSize; + size += 1 * getUpdatedList().size(); + } + { + int dataSize = 0; + for (int i = 0; i < deleted_.size(); i++) { + dataSize += com.google.protobuf.CodedOutputStream + .computeUInt64SizeNoTag(deleted_.get(i)); + } + size += dataSize; + size += 1 * getDeletedList().size(); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode) obj; + + boolean result = true; + result = result && (hasStartId() == other.hasStartId()); + if (hasStartId()) { + result = result && (getStartId() + == other.getStartId()); + } + result = result && getUpdatedList() + .equals(other.getUpdatedList()); + result = result && getDeletedList() + .equals(other.getDeletedList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasStartId()) { + hash = (37 * hash) + START_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getStartId()); + } + if (getUpdatedCount() > 0) { + hash = (37 * hash) + UPDATED_FIELD_NUMBER; + hash = (53 * hash) + getUpdatedList().hashCode(); + } + if (getDeletedCount() > 0) { + hash = (37 * hash) + DELETED_FIELD_NUMBER; + hash = (53 * hash) + getDeletedList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureStoreTracker.TrackerNode} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + startId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000001); + updated_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + deleted_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_TrackerNode_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.startId_ = startId_; + if (((bitField0_ & 0x00000002) == 0x00000002)) { + updated_ = java.util.Collections.unmodifiableList(updated_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.updated_ = updated_; + if (((bitField0_ & 0x00000004) == 0x00000004)) { + deleted_ = java.util.Collections.unmodifiableList(deleted_); + bitField0_ = (bitField0_ & ~0x00000004); + } + result.deleted_ = deleted_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.getDefaultInstance()) return this; + if (other.hasStartId()) { + setStartId(other.getStartId()); + } + if (!other.updated_.isEmpty()) { + if (updated_.isEmpty()) { + updated_ = other.updated_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureUpdatedIsMutable(); + updated_.addAll(other.updated_); + } + onChanged(); + } + if (!other.deleted_.isEmpty()) { + if (deleted_.isEmpty()) { + deleted_ = other.deleted_; + bitField0_ = (bitField0_ & ~0x00000004); + } else { + ensureDeletedIsMutable(); + deleted_.addAll(other.deleted_); + } + onChanged(); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasStartId()) { + + return false; + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required uint64 start_id = 1; + private long startId_ ; + /** + * required uint64 start_id = 1; + */ + public boolean hasStartId() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required uint64 start_id = 1; + */ + public long getStartId() { + return startId_; + } + /** + * required uint64 start_id = 1; + */ + public Builder setStartId(long value) { + bitField0_ |= 0x00000001; + startId_ = value; + onChanged(); + return this; + } + /** + * required uint64 start_id = 1; + */ + public Builder clearStartId() { + bitField0_ = (bitField0_ & ~0x00000001); + startId_ = 0L; + onChanged(); + return this; + } + + // repeated uint64 updated = 2; + private java.util.List updated_ = java.util.Collections.emptyList(); + private void ensureUpdatedIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + updated_ = new java.util.ArrayList(updated_); + bitField0_ |= 0x00000002; + } + } + /** + * repeated uint64 updated = 2; + */ + public java.util.List + getUpdatedList() { + return java.util.Collections.unmodifiableList(updated_); + } + /** + * repeated uint64 updated = 2; + */ + public int getUpdatedCount() { + return updated_.size(); + } + /** + * repeated uint64 updated = 2; + */ + public long getUpdated(int index) { + return updated_.get(index); + } + /** + * repeated uint64 updated = 2; + */ + public Builder setUpdated( + int index, long value) { + ensureUpdatedIsMutable(); + updated_.set(index, value); + onChanged(); + return this; + } + /** + * repeated uint64 updated = 2; + */ + public Builder addUpdated(long value) { + ensureUpdatedIsMutable(); + updated_.add(value); + onChanged(); + return this; + } + /** + * repeated uint64 updated = 2; + */ + public Builder addAllUpdated( + java.lang.Iterable values) { + ensureUpdatedIsMutable(); + super.addAll(values, updated_); + onChanged(); + return this; + } + /** + * repeated uint64 updated = 2; + */ + public Builder clearUpdated() { + updated_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + // repeated uint64 deleted = 3; + private java.util.List deleted_ = java.util.Collections.emptyList(); + private void ensureDeletedIsMutable() { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + deleted_ = new java.util.ArrayList(deleted_); + bitField0_ |= 0x00000004; + } + } + /** + * repeated uint64 deleted = 3; + */ + public java.util.List + getDeletedList() { + return java.util.Collections.unmodifiableList(deleted_); + } + /** + * repeated uint64 deleted = 3; + */ + public int getDeletedCount() { + return deleted_.size(); + } + /** + * repeated uint64 deleted = 3; + */ + public long getDeleted(int index) { + return deleted_.get(index); + } + /** + * repeated uint64 deleted = 3; + */ + public Builder setDeleted( + int index, long value) { + ensureDeletedIsMutable(); + deleted_.set(index, value); + onChanged(); + return this; + } + /** + * repeated uint64 deleted = 3; + */ + public Builder addDeleted(long value) { + ensureDeletedIsMutable(); + deleted_.add(value); + onChanged(); + return this; + } + /** + * repeated uint64 deleted = 3; + */ + public Builder addAllDeleted( + java.lang.Iterable values) { + ensureDeletedIsMutable(); + super.addAll(values, deleted_); + onChanged(); + return this; + } + /** + * repeated uint64 deleted = 3; + */ + public Builder clearDeleted() { + deleted_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ProcedureStoreTracker.TrackerNode) + } + + static { + defaultInstance = new TrackerNode(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureStoreTracker.TrackerNode) + } + + // repeated .ProcedureStoreTracker.TrackerNode node = 1; + public static final int NODE_FIELD_NUMBER = 1; + private java.util.List node_; + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List getNodeList() { + return node_; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List + getNodeOrBuilderList() { + return node_; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public int getNodeCount() { + return node_.size(); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode getNode(int index) { + return node_.get(index); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder getNodeOrBuilder( + int index) { + return node_.get(index); + } + + private void initFields() { + node_ = java.util.Collections.emptyList(); + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + for (int i = 0; i < getNodeCount(); i++) { + if (!getNode(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + for (int i = 0; i < node_.size(); i++) { + output.writeMessage(1, node_.get(i)); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + for (int i = 0; i < node_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(1, node_.get(i)); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker) obj; + + boolean result = true; + result = result && getNodeList() + .equals(other.getNodeList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (getNodeCount() > 0) { + hash = (37 * hash) + NODE_FIELD_NUMBER; + hash = (53 * hash) + getNodeList().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureStoreTracker} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTrackerOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getNodeFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + if (nodeBuilder_ == null) { + node_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + } else { + nodeBuilder_.clear(); + } + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureStoreTracker_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker(this); + int from_bitField0_ = bitField0_; + if (nodeBuilder_ == null) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { + node_ = java.util.Collections.unmodifiableList(node_); + bitField0_ = (bitField0_ & ~0x00000001); + } + result.node_ = node_; + } else { + result.node_ = nodeBuilder_.build(); + } + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.getDefaultInstance()) return this; + if (nodeBuilder_ == null) { + if (!other.node_.isEmpty()) { + if (node_.isEmpty()) { + node_ = other.node_; + bitField0_ = (bitField0_ & ~0x00000001); + } else { + ensureNodeIsMutable(); + node_.addAll(other.node_); + } + onChanged(); + } + } else { + if (!other.node_.isEmpty()) { + if (nodeBuilder_.isEmpty()) { + nodeBuilder_.dispose(); + nodeBuilder_ = null; + node_ = other.node_; + bitField0_ = (bitField0_ & ~0x00000001); + nodeBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getNodeFieldBuilder() : null; + } else { + nodeBuilder_.addAllMessages(other.node_); + } + } + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + for (int i = 0; i < getNodeCount(); i++) { + if (!getNode(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // repeated .ProcedureStoreTracker.TrackerNode node = 1; + private java.util.List node_ = + java.util.Collections.emptyList(); + private void ensureNodeIsMutable() { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { + node_ = new java.util.ArrayList(node_); + bitField0_ |= 0x00000001; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder> nodeBuilder_; + + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List getNodeList() { + if (nodeBuilder_ == null) { + return java.util.Collections.unmodifiableList(node_); + } else { + return nodeBuilder_.getMessageList(); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public int getNodeCount() { + if (nodeBuilder_ == null) { + return node_.size(); + } else { + return nodeBuilder_.getCount(); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode getNode(int index) { + if (nodeBuilder_ == null) { + return node_.get(index); + } else { + return nodeBuilder_.getMessage(index); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder setNode( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode value) { + if (nodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNodeIsMutable(); + node_.set(index, value); + onChanged(); + } else { + nodeBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder setNode( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builderForValue) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + node_.set(index, builderForValue.build()); + onChanged(); + } else { + nodeBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addNode(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode value) { + if (nodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNodeIsMutable(); + node_.add(value); + onChanged(); + } else { + nodeBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addNode( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode value) { + if (nodeBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureNodeIsMutable(); + node_.add(index, value); + onChanged(); + } else { + nodeBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addNode( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builderForValue) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + node_.add(builderForValue.build()); + onChanged(); + } else { + nodeBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addNode( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builderForValue) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + node_.add(index, builderForValue.build()); + onChanged(); + } else { + nodeBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder addAllNode( + java.lang.Iterable values) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + super.addAll(values, node_); + onChanged(); + } else { + nodeBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder clearNode() { + if (nodeBuilder_ == null) { + node_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + } else { + nodeBuilder_.clear(); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public Builder removeNode(int index) { + if (nodeBuilder_ == null) { + ensureNodeIsMutable(); + node_.remove(index); + onChanged(); + } else { + nodeBuilder_.remove(index); + } + return this; + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder getNodeBuilder( + int index) { + return getNodeFieldBuilder().getBuilder(index); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder getNodeOrBuilder( + int index) { + if (nodeBuilder_ == null) { + return node_.get(index); } else { + return nodeBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List + getNodeOrBuilderList() { + if (nodeBuilder_ != null) { + return nodeBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(node_); + } + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder addNodeBuilder() { + return getNodeFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.getDefaultInstance()); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder addNodeBuilder( + int index) { + return getNodeFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.getDefaultInstance()); + } + /** + * repeated .ProcedureStoreTracker.TrackerNode node = 1; + */ + public java.util.List + getNodeBuilderList() { + return getNodeFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder> + getNodeFieldBuilder() { + if (nodeBuilder_ == null) { + nodeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNodeOrBuilder>( + node_, + ((bitField0_ & 0x00000001) == 0x00000001), + getParentForChildren(), + isClean()); + node_ = null; + } + return nodeBuilder_; + } + + // @@protoc_insertion_point(builder_scope:ProcedureStoreTracker) + } + + static { + defaultInstance = new ProcedureStoreTracker(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureStoreTracker) + } + + public interface ProcedureWALEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + + // required .ProcedureWALEntry.Type type = 1; + /** + * required .ProcedureWALEntry.Type type = 1; + */ + boolean hasType(); + /** + * required .ProcedureWALEntry.Type type = 1; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type getType(); + + // repeated .Procedure procedure = 2; + /** + * repeated .Procedure procedure = 2; + */ + java.util.List + getProcedureList(); + /** + * repeated .Procedure procedure = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index); + /** + * repeated .Procedure procedure = 2; + */ + int getProcedureCount(); + /** + * repeated .Procedure procedure = 2; + */ + java.util.List + getProcedureOrBuilderList(); + /** + * repeated .Procedure procedure = 2; + */ + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index); + + // optional uint64 proc_id = 3; + /** + * optional uint64 proc_id = 3; + */ + boolean hasProcId(); + /** + * optional uint64 proc_id = 3; + */ + long getProcId(); + } + /** + * Protobuf type {@code ProcedureWALEntry} + */ + public static final class ProcedureWALEntry extends + com.google.protobuf.GeneratedMessage + implements ProcedureWALEntryOrBuilder { + // Use ProcedureWALEntry.newBuilder() to construct. + private ProcedureWALEntry(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); + } + private ProcedureWALEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final ProcedureWALEntry defaultInstance; + public static ProcedureWALEntry getDefaultInstance() { + return defaultInstance; + } + + public ProcedureWALEntry getDefaultInstanceForType() { + return defaultInstance; + } + + private final com.google.protobuf.UnknownFieldSet unknownFields; + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private ProcedureWALEntry( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + initFields(); + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } + case 8: { + int rawValue = input.readEnum(); + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type value = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.valueOf(rawValue); + if (value == null) { + unknownFields.mergeVarintField(1, rawValue); + } else { + bitField0_ |= 0x00000001; + type_ = value; + } + break; + } + case 18: { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + procedure_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000002; + } + procedure_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry)); + break; + } + case 24: { + bitField0_ |= 0x00000002; + procId_ = input.readUInt64(); + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e.getMessage()).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + procedure_ = java.util.Collections.unmodifiableList(procedure_); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Builder.class); + } + + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public ProcedureWALEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new ProcedureWALEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + /** + * Protobuf enum {@code ProcedureWALEntry.Type} + */ + public enum Type + implements com.google.protobuf.ProtocolMessageEnum { + /** + * EOF = 1; + */ + EOF(0, 1), + /** + * INIT = 2; + */ + INIT(1, 2), + /** + * INSERT = 3; + */ + INSERT(2, 3), + /** + * UPDATE = 4; + */ + UPDATE(3, 4), + /** + * DELETE = 5; + */ + DELETE(4, 5), + /** + * COMPACT = 6; + */ + COMPACT(5, 6), + ; + + /** + * EOF = 1; + */ + public static final int EOF_VALUE = 1; + /** + * INIT = 2; + */ + public static final int INIT_VALUE = 2; + /** + * INSERT = 3; + */ + public static final int INSERT_VALUE = 3; + /** + * UPDATE = 4; + */ + public static final int UPDATE_VALUE = 4; + /** + * DELETE = 5; + */ + public static final int DELETE_VALUE = 5; + /** + * COMPACT = 6; + */ + public static final int COMPACT_VALUE = 6; + + + public final int getNumber() { return value; } + + public static Type valueOf(int value) { + switch (value) { + case 1: return EOF; + case 2: return INIT; + case 3: return INSERT; + case 4: return UPDATE; + case 5: return DELETE; + case 6: return COMPACT; + default: return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public Type findValueByNumber(int number) { + return Type.valueOf(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor + getValueDescriptor() { + return getDescriptor().getValues().get(index); + } + public final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptorForType() { + return getDescriptor(); + } + public static final com.google.protobuf.Descriptors.EnumDescriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.getDescriptor().getEnumTypes().get(0); + } + + private static final Type[] VALUES = values(); + + public static Type valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException( + "EnumValueDescriptor is not for this type."); + } + return VALUES[desc.getIndex()]; + } + + private final int index; + private final int value; + + private Type(int index, int value) { + this.index = index; + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:ProcedureWALEntry.Type) + } + + private int bitField0_; + // required .ProcedureWALEntry.Type type = 1; + public static final int TYPE_FIELD_NUMBER = 1; + private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type type_; + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type getType() { + return type_; + } + + // repeated .Procedure procedure = 2; + public static final int PROCEDURE_FIELD_NUMBER = 2; + private java.util.List procedure_; + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List getProcedureList() { + return procedure_; + } + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List + getProcedureOrBuilderList() { + return procedure_; + } + /** + * repeated .Procedure procedure = 2; + */ + public int getProcedureCount() { + return procedure_.size(); + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { + return procedure_.get(index); + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index) { + return procedure_.get(index); + } + + // optional uint64 proc_id = 3; + public static final int PROC_ID_FIELD_NUMBER = 3; + private long procId_; + /** + * optional uint64 proc_id = 3; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000002) == 0x00000002); + } + /** + * optional uint64 proc_id = 3; + */ + public long getProcId() { + return procId_; + } + + private void initFields() { + type_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.EOF; + procedure_ = java.util.Collections.emptyList(); + procId_ = 0L; + } + private byte memoizedIsInitialized = -1; + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized != -1) return isInitialized == 1; + + if (!hasType()) { + memoizedIsInitialized = 0; + return false; + } + for (int i = 0; i < getProcedureCount(); i++) { + if (!getProcedure(i).isInitialized()) { + memoizedIsInitialized = 0; + return false; + } + } + memoizedIsInitialized = 1; + return true; + } + + public void writeTo(com.google.protobuf.CodedOutputStream output) + throws java.io.IOException { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, type_.getNumber()); + } + for (int i = 0; i < procedure_.size(); i++) { + output.writeMessage(2, procedure_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeUInt64(3, procId_); + } + getUnknownFields().writeTo(output); + } + + private int memoizedSerializedSize = -1; + public int getSerializedSize() { + int size = memoizedSerializedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeEnumSize(1, type_.getNumber()); + } + for (int i = 0; i < procedure_.size(); i++) { + size += com.google.protobuf.CodedOutputStream + .computeMessageSize(2, procedure_.get(i)); + } + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeUInt64Size(3, procId_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; + return size; + } + + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry)) { + return super.equals(obj); + } + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry other = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry) obj; + + boolean result = true; + result = result && (hasType() == other.hasType()); + if (hasType()) { + result = result && + (getType() == other.getType()); + } + result = result && getProcedureList() + .equals(other.getProcedureList()); + result = result && (hasProcId() == other.hasProcId()); + if (hasProcId()) { + result = result && (getProcId() + == other.getProcId()); + } + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; + } + + private int memoizedHashCode = 0; + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptorForType().hashCode(); + if (hasType()) { + hash = (37 * hash) + TYPE_FIELD_NUMBER; + hash = (53 * hash) + hashEnum(getType()); + } + if (getProcedureCount() > 0) { + hash = (37 * hash) + PROCEDURE_FIELD_NUMBER; + hash = (53 * hash) + getProcedureList().hashCode(); + } + if (hasProcId()) { + hash = (37 * hash) + PROC_ID_FIELD_NUMBER; + hash = (53 * hash) + hashLong(getProcId()); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + byte[] data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseDelimitedFrom(java.io.InputStream input) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseDelimitedFrom( + java.io.InputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseDelimitedFrom(input, extensionRegistry); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + com.google.protobuf.CodedInputStream input) + throws java.io.IOException { + return PARSER.parseFrom(input); + } + public static org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return PARSER.parseFrom(input, extensionRegistry); + } + + public static Builder newBuilder() { return Builder.create(); } + public Builder newBuilderForType() { return newBuilder(); } + public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry prototype) { + return newBuilder().mergeFrom(prototype); + } + public Builder toBuilder() { return newBuilder(this); } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * Protobuf type {@code ProcedureWALEntry} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntryOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_descriptor; + } + + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_fieldAccessorTable + .ensureFieldAccessorsInitialized( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.class, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Builder.class); + } + + // Construct using org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder( + com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + getProcedureFieldBuilder(); + } + } + private static Builder create() { + return new Builder(); + } + + public Builder clear() { + super.clear(); + type_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.EOF; + bitField0_ = (bitField0_ & ~0x00000001); + if (procedureBuilder_ == null) { + procedure_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + } else { + procedureBuilder_.clear(); + } + procId_ = 0L; + bitField0_ = (bitField0_ & ~0x00000004); + return this; + } + + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + + public com.google.protobuf.Descriptors.Descriptor + getDescriptorForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.internal_static_ProcedureWALEntry_descriptor; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry getDefaultInstanceForType() { + return org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.getDefaultInstance(); + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry build() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry buildPartial() { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry result = new org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry(this); + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { + to_bitField0_ |= 0x00000001; + } + result.type_ = type_; + if (procedureBuilder_ == null) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { + procedure_ = java.util.Collections.unmodifiableList(procedure_); + bitField0_ = (bitField0_ & ~0x00000002); + } + result.procedure_ = procedure_; + } else { + result.procedure_ = procedureBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { + to_bitField0_ |= 0x00000002; + } + result.procId_ = procId_; + result.bitField0_ = to_bitField0_; + onBuilt(); + return result; + } + + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry) { + return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry)other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry other) { + if (other == org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.getDefaultInstance()) return this; + if (other.hasType()) { + setType(other.getType()); + } + if (procedureBuilder_ == null) { + if (!other.procedure_.isEmpty()) { + if (procedure_.isEmpty()) { + procedure_ = other.procedure_; + bitField0_ = (bitField0_ & ~0x00000002); + } else { + ensureProcedureIsMutable(); + procedure_.addAll(other.procedure_); + } + onChanged(); + } + } else { + if (!other.procedure_.isEmpty()) { + if (procedureBuilder_.isEmpty()) { + procedureBuilder_.dispose(); + procedureBuilder_ = null; + procedure_ = other.procedure_; + bitField0_ = (bitField0_ & ~0x00000002); + procedureBuilder_ = + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? + getProcedureFieldBuilder() : null; + } else { + procedureBuilder_.addAllMessages(other.procedure_); + } + } + } + if (other.hasProcId()) { + setProcId(other.getProcId()); + } + this.mergeUnknownFields(other.getUnknownFields()); + return this; + } + + public final boolean isInitialized() { + if (!hasType()) { + + return false; + } + for (int i = 0; i < getProcedureCount(); i++) { + if (!getProcedure(i).isInitialized()) { + + return false; + } + } + return true; + } + + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry parsedMessage = null; + try { + parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry) e.getUnfinishedMessage(); + throw e; + } finally { + if (parsedMessage != null) { + mergeFrom(parsedMessage); + } + } + return this; + } + private int bitField0_; + + // required .ProcedureWALEntry.Type type = 1; + private org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type type_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.EOF; + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public boolean hasType() { + return ((bitField0_ & 0x00000001) == 0x00000001); + } + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type getType() { + return type_; + } + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public Builder setType(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + type_ = value; + onChanged(); + return this; + } + /** + * required .ProcedureWALEntry.Type type = 1; + */ + public Builder clearType() { + bitField0_ = (bitField0_ & ~0x00000001); + type_ = org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureWALEntry.Type.EOF; + onChanged(); + return this; + } + + // repeated .Procedure procedure = 2; + private java.util.List procedure_ = + java.util.Collections.emptyList(); + private void ensureProcedureIsMutable() { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { + procedure_ = new java.util.ArrayList(procedure_); + bitField0_ |= 0x00000002; + } + } + + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> procedureBuilder_; + + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List getProcedureList() { + if (procedureBuilder_ == null) { + return java.util.Collections.unmodifiableList(procedure_); + } else { + return procedureBuilder_.getMessageList(); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public int getProcedureCount() { + if (procedureBuilder_ == null) { + return procedure_.size(); + } else { + return procedureBuilder_.getCount(); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure getProcedure(int index) { + if (procedureBuilder_ == null) { + return procedure_.get(index); + } else { + return procedureBuilder_.getMessage(index); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder setProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.set(index, value); + onChanged(); + } else { + procedureBuilder_.setMessage(index, value); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder setProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.set(index, builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.setMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addProcedure(org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.add(value); + onChanged(); + } else { + procedureBuilder_.addMessage(value); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure value) { + if (procedureBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + ensureProcedureIsMutable(); + procedure_.add(index, value); + onChanged(); + } else { + procedureBuilder_.addMessage(index, value); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addProcedure( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.add(builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.addMessage(builderForValue.build()); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addProcedure( + int index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.add(index, builderForValue.build()); + onChanged(); + } else { + procedureBuilder_.addMessage(index, builderForValue.build()); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder addAllProcedure( + java.lang.Iterable values) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + super.addAll(values, procedure_); + onChanged(); + } else { + procedureBuilder_.addAllMessages(values); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder clearProcedure() { + if (procedureBuilder_ == null) { + procedure_ = java.util.Collections.emptyList(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + } else { + procedureBuilder_.clear(); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public Builder removeProcedure(int index) { + if (procedureBuilder_ == null) { + ensureProcedureIsMutable(); + procedure_.remove(index); + onChanged(); + } else { + procedureBuilder_.remove(index); + } + return this; + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder getProcedureBuilder( + int index) { + return getProcedureFieldBuilder().getBuilder(index); + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder( + int index) { + if (procedureBuilder_ == null) { + return procedure_.get(index); } else { + return procedureBuilder_.getMessageOrBuilder(index); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List + getProcedureOrBuilderList() { + if (procedureBuilder_ != null) { + return procedureBuilder_.getMessageOrBuilderList(); + } else { + return java.util.Collections.unmodifiableList(procedure_); + } + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder() { + return getProcedureFieldBuilder().addBuilder( + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); + } + /** + * repeated .Procedure procedure = 2; + */ + public org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder addProcedureBuilder( + int index) { + return getProcedureFieldBuilder().addBuilder( + index, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()); + } + /** + * repeated .Procedure procedure = 2; + */ + public java.util.List + getProcedureBuilderList() { + return getProcedureFieldBuilder().getBuilderList(); + } + private com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> + getProcedureFieldBuilder() { + if (procedureBuilder_ == null) { + procedureBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< + org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>( + procedure_, + ((bitField0_ & 0x00000002) == 0x00000002), + getParentForChildren(), + isClean()); + procedure_ = null; + } + return procedureBuilder_; + } + + // optional uint64 proc_id = 3; + private long procId_ ; + /** + * optional uint64 proc_id = 3; + */ + public boolean hasProcId() { + return ((bitField0_ & 0x00000004) == 0x00000004); + } + /** + * optional uint64 proc_id = 3; + */ + public long getProcId() { + return procId_; + } + /** + * optional uint64 proc_id = 3; + */ + public Builder setProcId(long value) { + bitField0_ |= 0x00000004; + procId_ = value; + onChanged(); + return this; + } + /** + * optional uint64 proc_id = 3; + */ + public Builder clearProcId() { + bitField0_ = (bitField0_ & ~0x00000004); + procId_ = 0L; + onChanged(); + return this; + } + + // @@protoc_insertion_point(builder_scope:ProcedureWALEntry) + } + + static { + defaultInstance = new ProcedureWALEntry(true); + defaultInstance.initFields(); + } + + // @@protoc_insertion_point(class_scope:ProcedureWALEntry) + } + + private static com.google.protobuf.Descriptors.Descriptor + internal_static_Procedure_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_Procedure_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_SequentialProcedureData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_SequentialProcedureData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_StateMachineProcedureData_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_StateMachineProcedureData_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureWALHeader_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureWALHeader_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureWALTrailer_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureWALTrailer_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureStoreTracker_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureStoreTracker_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureStoreTracker_TrackerNode_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureStoreTracker_TrackerNode_fieldAccessorTable; + private static com.google.protobuf.Descriptors.Descriptor + internal_static_ProcedureWALEntry_descriptor; + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_ProcedureWALEntry_fieldAccessorTable; + + public static com.google.protobuf.Descriptors.FileDescriptor + getDescriptor() { + return descriptor; + } + private static com.google.protobuf.Descriptors.FileDescriptor + descriptor; + static { + java.lang.String[] descriptorData = { + "\n\017Procedure.proto\032\023ErrorHandling.proto\"\217" + + "\002\n\tProcedure\022\022\n\nclass_name\030\001 \002(\t\022\021\n\tpare" + + "nt_id\030\002 \001(\004\022\017\n\007proc_id\030\003 \002(\004\022\022\n\nstart_ti" + + "me\030\004 \002(\004\022\r\n\005owner\030\005 \001(\t\022\036\n\005state\030\006 \002(\0162\017" + + ".ProcedureState\022\020\n\010stack_id\030\007 \003(\r\022\023\n\013las" + + "t_update\030\010 \002(\004\022\017\n\007timeout\030\t \001(\r\022+\n\texcep" + + "tion\030\n \001(\0132\030.ForeignExceptionMessage\022\016\n\006" + + "result\030\013 \001(\014\022\022\n\nstate_data\030\014 \001(\014\"+\n\027Sequ" + + "entialProcedureData\022\020\n\010executed\030\001 \002(\010\"*\n" + + "\031StateMachineProcedureData\022\r\n\005state\030\001 \003(", + "\r\"X\n\022ProcedureWALHeader\022\017\n\007version\030\001 \002(\r" + + "\022\014\n\004type\030\002 \002(\r\022\016\n\006log_id\030\003 \002(\004\022\023\n\013min_pr" + + "oc_id\030\004 \002(\004\";\n\023ProcedureWALTrailer\022\017\n\007ve" + + "rsion\030\001 \002(\r\022\023\n\013tracker_pos\030\002 \002(\004\"\214\001\n\025Pro" + + "cedureStoreTracker\0220\n\004node\030\001 \003(\0132\".Proce" + + "dureStoreTracker.TrackerNode\032A\n\013TrackerN" + + "ode\022\020\n\010start_id\030\001 \002(\004\022\017\n\007updated\030\002 \003(\004\022\017" + + "\n\007deleted\030\003 \003(\004\"\266\001\n\021ProcedureWALEntry\022%\n" + + "\004type\030\001 \002(\0162\027.ProcedureWALEntry.Type\022\035\n\t" + + "procedure\030\002 \003(\0132\n.Procedure\022\017\n\007proc_id\030\003", + " \001(\004\"J\n\004Type\022\007\n\003EOF\020\001\022\010\n\004INIT\020\002\022\n\n\006INSER" + + "T\020\003\022\n\n\006UPDATE\020\004\022\n\n\006DELETE\020\005\022\013\n\007COMPACT\020\006" + + "*p\n\016ProcedureState\022\020\n\014INITIALIZING\020\001\022\014\n\010" + + "RUNNABLE\020\002\022\013\n\007WAITING\020\003\022\023\n\017WAITING_TIMEO" + + "UT\020\004\022\016\n\nROLLEDBACK\020\005\022\014\n\010FINISHED\020\006BE\n*or" + + "g.apache.hadoop.hbase.protobuf.generated" + + "B\017ProcedureProtosH\001\210\001\001\240\001\001" + }; + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_Procedure_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_Procedure_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Procedure_descriptor, + new java.lang.String[] { "ClassName", "ParentId", "ProcId", "StartTime", "Owner", "State", "StackId", "LastUpdate", "Timeout", "Exception", "Result", "StateData", }); + internal_static_SequentialProcedureData_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_SequentialProcedureData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SequentialProcedureData_descriptor, + new java.lang.String[] { "Executed", }); + internal_static_StateMachineProcedureData_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_StateMachineProcedureData_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_StateMachineProcedureData_descriptor, + new java.lang.String[] { "State", }); + internal_static_ProcedureWALHeader_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_ProcedureWALHeader_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureWALHeader_descriptor, + new java.lang.String[] { "Version", "Type", "LogId", "MinProcId", }); + internal_static_ProcedureWALTrailer_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_ProcedureWALTrailer_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureWALTrailer_descriptor, + new java.lang.String[] { "Version", "TrackerPos", }); + internal_static_ProcedureStoreTracker_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_ProcedureStoreTracker_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureStoreTracker_descriptor, + new java.lang.String[] { "Node", }); + internal_static_ProcedureStoreTracker_TrackerNode_descriptor = + internal_static_ProcedureStoreTracker_descriptor.getNestedTypes().get(0); + internal_static_ProcedureStoreTracker_TrackerNode_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureStoreTracker_TrackerNode_descriptor, + new java.lang.String[] { "StartId", "Updated", "Deleted", }); + internal_static_ProcedureWALEntry_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_ProcedureWALEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_ProcedureWALEntry_descriptor, + new java.lang.String[] { "Type", "Procedure", "ProcId", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor + .internalBuildGeneratedFileFrom(descriptorData, + new com.google.protobuf.Descriptors.FileDescriptor[] { + org.apache.hadoop.hbase.protobuf.generated.ErrorHandlingProtos.getDescriptor(), + }, assigner); + } + + // @@protoc_insertion_point(outer_class_scope) +} diff --git a/hbase-protocol/src/main/protobuf/Procedure.proto b/hbase-protocol/src/main/protobuf/Procedure.proto new file mode 100644 index 0000000..232c290 --- /dev/null +++ b/hbase-protocol/src/main/protobuf/Procedure.proto @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.hbase.protobuf.generated"; +option java_outer_classname = "ProcedureProtos"; +option java_generic_services = true; +option java_generate_equals_and_hash = true; +option optimize_for = SPEED; + +import "ErrorHandling.proto"; + +enum ProcedureState { + INITIALIZING = 1; // Procedure in construction, not yet added to the executor + RUNNABLE = 2; // Procedure added to the executor, and ready to be executed + WAITING = 3; // The procedure is waiting on children to be completed + WAITING_TIMEOUT = 4; // The procedure is waiting a timout or an external event + ROLLEDBACK = 5; // The procedure failed and was rolledback + FINISHED = 6; // The procedure execution is completed. may need a rollback if failed. +} + +/** + * Procedure metadata, serialized by the ProcedureStore to be able to recover the old state. + */ +message Procedure { + // internal "static" state + required string class_name = 1; // full classname to be able to instantiate the procedure + optional uint64 parent_id = 2; // parent if not a root-procedure otherwise not set + required uint64 proc_id = 3; + required uint64 start_time = 4; + optional string owner = 5; + + // internal "runtime" state + required ProcedureState state = 6; + repeated uint32 stack_id = 7; // stack indices in case the procedure was running + required uint64 last_update = 8; + optional uint32 timeout = 9; + + // user state/results + optional ForeignExceptionMessage exception = 10; + optional bytes result = 11; // opaque (user) result structure + optional bytes state_data = 12; // opaque (user) procedure internal-state +} + +/** + * SequentialProcedure data + */ +message SequentialProcedureData { + required bool executed = 1; +} + +/** + * StateMachineProcedure data + */ +message StateMachineProcedureData { + repeated uint32 state = 1; +} + +/** + * Procedure WAL header + */ +message ProcedureWALHeader { + required uint32 version = 1; + required uint32 type = 2; + required uint64 log_id = 3; + required uint64 min_proc_id = 4; +} + +/** + * Procedure WAL trailer + */ +message ProcedureWALTrailer { + required uint32 version = 1; + required uint64 tracker_pos = 2; +} + +message ProcedureStoreTracker { + message TrackerNode { + required uint64 start_id = 1; + repeated uint64 updated = 2; + repeated uint64 deleted = 3; + } + + repeated TrackerNode node = 1; +} + +message ProcedureWALEntry { + enum Type { + EOF = 1; + INIT = 2; + INSERT = 3; + UPDATE = 4; + DELETE = 5; + COMPACT = 6; + } + + required Type type = 1; + repeated Procedure procedure = 2; + optional uint64 proc_id = 3; +} diff --git a/pom.xml b/pom.xml index f7ae209..ce49c04 100644 --- a/pom.xml +++ b/pom.xml @@ -56,6 +56,7 @@ hbase-client hbase-hadoop-compat hbase-common + hbase-procedure hbase-it hbase-examples hbase-prefix-tree @@ -871,7 +872,7 @@ ${basedir}/target/asciidoc - + @@ -1014,7 +1015,7 @@ org.asciidoctor asciidoctor-maven-plugin - 1.5.2 + 1.5.2 false @@ -1034,10 +1035,10 @@ - output-html + output-html site - process-asciidoc + process-asciidoc @@ -1191,6 +1192,7 @@ Modules are pretty heavy-weight things, so doing this work isn't too bad. --> hbase-server-${project.version}-tests.jar hbase-common-${project.version}-tests.jar + hbase-procedure-${project.version}-tests.jar hbase-it-${project.version}-tests.jar hbase-annotations-${project.version}-tests.jar 2.18 @@ -1262,6 +1264,17 @@ org.apache.hbase + hbase-procedure + ${project.version} + + + org.apache.hbase + hbase-procedure + ${project.version} + test-jar + + + org.apache.hbase hbase-hadoop-compat ${project.version}