diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index b85f6ed..ae627ba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -506,7 +506,7 @@ class AsyncProcess { Runnable runnable = Trace.wrap("AsyncProcess.sendMultiAction", new Runnable() { @Override public void run() { - MultiResponse res; + MultiResponse res = null; try { MultiServerCallable callable = createCallable(loc, multiAction); try { @@ -517,8 +517,11 @@ class AsyncProcess { ", resubmitting all since not sure where we are at", e); resubmitAll(initialActions, multiAction, loc, numAttempt + 1, e, errorsByServer); return; + } catch (RuntimeException e) { + // Add log of exception here. Caller suppresses RuntimeExceptions. + LOG.error(e); + throw e; } - receiveMultiAction(initialActions, multiAction, loc, res, numAttempt, errorsByServer); } finally { decTaskCounters(multiAction.getRegions(), loc.getServerName()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java index a17be55..9edff7a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSmallScanner.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; -import java.util.ArrayList; -import java.util.LinkedList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -29,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController; @@ -133,13 +130,13 @@ public class ClientSmallScanner extends ClientScanner { || checkScanStopRow(endKey) || done) { close(); if (LOG.isDebugEnabled()) { - LOG.debug("Finished with small scan at " + this.currentRegion); + LOG.debug("Finished scan of " + this.currentRegion); } return false; } localStartKey = endKey; if (LOG.isDebugEnabled()) { - LOG.debug("Finished with region " + this.currentRegion); + LOG.debug("Finished with " + this.currentRegion); } } else if (this.lastResult != null) { localStartKey = this.lastResult.getRow(); @@ -150,8 +147,7 @@ public class ClientSmallScanner extends ClientScanner { } if (LOG.isTraceEnabled()) { - LOG.trace("Advancing internal small scanner to startKey at '" - + Bytes.toStringBinary(localStartKey) + "'"); + LOG.trace("Advancing to startKey at '" + Bytes.toStringBinary(localStartKey) + "'"); } smallScanCallable = getSmallScanCallable(localStartKey, cacheNum); if (this.scanMetrics != null && skipRowOfFirstResult == null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index 4d668bc..0ca1a92 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -1205,9 +1205,9 @@ public class HConnectionManager { ServerName serverName = HRegionInfo.getServerName(regionInfoRow); if (serverName == null) { - throw new NoServerForRegionException("No server address listed " + - "in " + parentTable + " for region " + - regionInfo.getRegionNameAsString() + " containing row " + + throw new NoServerForRegionException("No serverName " + + "in " + parentTable + " for " + + regionInfo.getRegionNameAsString() + " containing " + Bytes.toStringBinary(row)); } @@ -1233,12 +1233,10 @@ public class HConnectionManager { } if (tries < numTries - 1) { if (LOG.isDebugEnabled()) { - LOG.debug("locateRegionInMeta parentTable=" + - parentTable + ", metaLocation=" + - ((metaLocation == null)? "null": "{" + metaLocation + "}") + - ", attempt=" + tries + " of " + - this.numTries + " failed; retrying after sleep of " + - ConnectionUtils.getPauseTime(this.pause, tries) + " because: " + e.getMessage()); + LOG.debug("locateRegionInMeta failed; parentTable=" + parentTable + + ", metaLocation=" + ((metaLocation == null)? "null": "{" + metaLocation + "}") + + ", attempt=" + tries + "/" + this.numTries + "; retrying after=" + + ConnectionUtils.getPauseTime(this.pause, tries) + "ms; because: " + e.getMessage()); } } else { throw e; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index 974af7b..21447e0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -911,6 +911,7 @@ public class HTable implements HTableInterface { */ private void backgroundFlushCommits(boolean synchronous) throws InterruptedIOException, RetriesExhaustedWithDetailsException { + if (this.writeAsyncBuffer.isEmpty()) return; try { do { @@ -1229,7 +1230,7 @@ public class HTable implements HTableInterface { return; } flushCommits(); - if (cleanupPoolOnClose) { + if (cleanupPoolOnClose && this.pool != null) { this.pool.shutdown(); } if (cleanupConnectionOnClose) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index ae8e9c2..7995d94 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -80,13 +80,14 @@ class MultiServerCallable extends RegionServerCallable { } else { regionActionBuilder = RequestConverter.buildRegionAction(regionName, actions); } - multiRequestBuilder.addRegionAction(regionActionBuilder.build()); + RegionAction ra = regionActionBuilder.build(); + multiRequestBuilder.addRegionAction(ra); } // Controller optionally carries cell data over the proxy/service boundary and also // optionally ferries cell response data back out again. PayloadCarryingRpcController controller = new PayloadCarryingRpcController(cells); controller.setPriority(getTableName()); - ClientProtos.MultiResponse responseProto; + ClientProtos.MultiResponse responseProto = null; ClientProtos.MultiRequest requestProto = multiRequestBuilder.build(); try { responseProto = getStub().multi(controller, requestProto); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index d19e391..c2e27c5 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -22,30 +22,54 @@ import static org.junit.Assert.fail; import java.io.IOException; import java.net.SocketTimeoutException; +import java.util.Comparator; +import java.util.Map; +import java.util.SortedMap; +import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.SmallTests; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.protobuf.generated.CellProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest; +import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.hbase.util.Threads; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; import org.junit.Before; import org.junit.Ignore; import org.junit.Test; import org.junit.experimental.categories.Category; import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import com.google.protobuf.ByteString; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -54,9 +78,11 @@ import com.google.protobuf.ServiceException; * Mock up cluster emissions. */ @Category(SmallTests.class) -public class TestClientNoCluster { +public class TestClientNoCluster extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(TestClientNoCluster.class); private Configuration conf; + public static final ServerName META_SERVERNAME = + new ServerName("meta.example.org", 60010, 12345); @Before public void setUp() throws Exception { @@ -71,7 +97,7 @@ public class TestClientNoCluster { * Simple cluster registry inserted in place of our usual zookeeper based one. */ static class SimpleRegistry implements Registry { - final ServerName META_HOST = new ServerName("10.10.10.10", 60010, 12345); + final ServerName META_HOST = META_SERVERNAME; @Override public void init(HConnection connection) { @@ -301,4 +327,345 @@ public class TestClientNoCluster { return this.stub; } } + + /** + * Fake many regionservers and many regions on a connection implementation. + */ + static class ManyServersManyRegionsConnection + extends HConnectionManager.HConnectionImplementation { + final ClientService.BlockingInterface stub = + Mockito.mock(ClientService.BlockingInterface.class); + /** + * Map of faked-up rows of a 'meta table'. + */ + final SortedMap> meta; + final AtomicLong sequenceids = new AtomicLong(0); + + ManyServersManyRegionsConnection(Configuration conf, boolean managed, + ExecutorService pool, User user) + throws IOException { + super(conf, managed, pool, user); + this.meta = makeMeta(Bytes.toBytes( + conf.get("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE))), + conf.getInt("hbase.test.regions", 100), + conf.getLong("hbase.test.namespace.span", 1000), + conf.getInt("hbase.test.servers", 10)); + try { + setupMocking(); + } catch (ServiceException e) { + throw new IOException("Failed setting up mocks", e); + } + } + + /** + * Set up the mocks for get, scans, and puts. + * @throws ServiceException + */ + private void setupMocking() + throws ServiceException { + // Mock put function. Inbound it is a MultiRequest. Response with MultiResponse. + Mockito.when(this.stub.multi((RpcController)Mockito.any(), (MultiRequest)Mockito.any())). + thenAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + // There are two arguments. Ignore the first one which is rpccontroller. + Object [] args = invocation.getArguments(); + MultiRequest request = (MultiRequest)args[1]; + // Make a response to match the request. Act like there were no failures. + ClientProtos.MultiResponse.Builder builder = ClientProtos.MultiResponse.newBuilder(); + // Per Region. + LOG.info("Count of Regions " + request.getRegionActionCount()); + for (RegionAction regionAction: request.getRegionActionList()) { + RegionActionResult.Builder regionActionResultBuilder = + RegionActionResult.newBuilder(); + // Per Action in a Region. + for (ClientProtos.Action action: regionAction.getActionList()) { + ResultOrException.Builder roeBuilder = ResultOrException.newBuilder(); + // Return empty Result and proper index as result. + roeBuilder.setResult(ClientProtos.Result.getDefaultInstance()); + roeBuilder.setIndex(action.getIndex()); + regionActionResultBuilder.addResultOrException(roeBuilder.build()); + } + builder.addRegionActionResult(regionActionResultBuilder.build()); + } + Threads.sleep(10000); + return builder.build(); + } + }); + + // Mock scan function. + Mockito.when(this.stub.scan((RpcController)Mockito.any(), (ScanRequest)Mockito.any())). + thenAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + // There are two arguments. Igore the first one which is rpccontroller. + Object [] args = invocation.getArguments(); + ScanRequest request = (ScanRequest)args[1]; + // Presume it is a scan of meta for now. Not all scans provide a region spec expecting + // the server to keep reference by scannerid. TODO. + return doMetaScanResponse(request); + } + + ScanResponse doMetaScanResponse(final ScanRequest request) throws IOException { + ScanResponse.Builder builder = ScanResponse.newBuilder(); + int max = request.getNumberOfRows(); + int count = 0; + Map> tail = + request.hasScan()? meta.tailMap(request.getScan().getStartRow().toByteArray()): meta; + for (Map.Entry> e: tail.entrySet()) { + // Can be 0 on open of a scanner -- i.e. rpc to setup scannerid only. + if (max <= 0) break; + if (++count > max) break; + HRegionInfo hri = e.getValue().getFirst(); + ByteString row = ByteString.copyFrom(hri.getRegionName()); + ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); + resultBuilder.addCell(getRegionInfo(row, hri)); + resultBuilder.addCell(getServer(row, e.getValue().getSecond())); + resultBuilder.addCell(getStartCode(row)); + builder.addResults(resultBuilder.build()); + // Set more to false if we are on the last region in table. + if (hri.getEndKey().length <= 0) builder.setMoreResults(false); + else builder.setMoreResults(true); + } + // If no scannerid, set one. + builder.setScannerId(request.hasScannerId()? + request.getScannerId(): sequenceids.incrementAndGet()); + return builder.build(); + } + }); + + // Mock the Get function. + Mockito.when(this.stub.get((RpcController)Mockito.any(), (GetRequest)Mockito.any())). + thenAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invocation) throws Throwable { + Object [] args = invocation.getArguments(); + // There are two arguments. Igore the first one which is rpccontroller. + GetRequest request = (GetRequest)args[1]; + boolean meta = isMetaRegion(request.getRegion().getValue().toByteArray(), + request.getRegion().getType()); + if (!meta) throw new UnsupportedOperationException(); + return doMetaGetResponse(request); + } + + GetResponse doMetaGetResponse(final GetRequest request) throws IOException { + ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); + ByteString row = request.getGet().getRow(); + Pair p = meta.get(row.toByteArray()); + if (p == null) { + if (request.getGet().getClosestRowBefore()) { + byte [] bytes = row.toByteArray(); + SortedMap> head = + bytes != null? meta.headMap(bytes): meta; + p = head == null? null: head.get(head.lastKey()); + } + } + if (p != null) { + resultBuilder.addCell(getRegionInfo(row, p.getFirst())); + resultBuilder.addCell(getServer(row, p.getSecond())); + } + resultBuilder.addCell(getStartCode(row)); + GetResponse.Builder builder = GetResponse.newBuilder(); + builder.setResult(resultBuilder.build()); + return builder.build(); + } + }); + } + + @Override + public BlockingInterface getClient(ServerName sn) throws IOException { + return this.stub; + } + } + + /** + * @param name region name or encoded region name. + * @param type + * @return True if we are dealing with a hbase:meta region. + */ + static boolean isMetaRegion(final byte [] name, final RegionSpecifierType type) { + switch (type) { + case REGION_NAME: + return Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(), name); + case ENCODED_REGION_NAME: + return Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), name); + default: throw new UnsupportedOperationException(); + } + } + + static CellProtos.Cell.Builder getBaseCellBuilder(final ByteString row) { + CellProtos.Cell.Builder cellBuilder = CellProtos.Cell.newBuilder(); + cellBuilder.setRow(row); + cellBuilder.setFamily(ByteString.copyFrom(HConstants.CATALOG_FAMILY)); + cellBuilder.setTimestamp(System.currentTimeMillis()); + return cellBuilder; + } + + static CellProtos.Cell getRegionInfo(final ByteString row, final HRegionInfo hri) + throws IOException { + CellProtos.Cell.Builder cellBuilder = getBaseCellBuilder(row); + cellBuilder.setQualifier(ByteString.copyFrom(HConstants.REGIONINFO_QUALIFIER)); + cellBuilder.setValue(ByteString.copyFrom(hri.toByteArray())); + return cellBuilder.build(); + } + + static CellProtos.Cell getServer(final ByteString row, final ServerName sn) { + CellProtos.Cell.Builder cellBuilder = getBaseCellBuilder(row); + cellBuilder.setQualifier(ByteString.copyFrom(HConstants.SERVER_QUALIFIER)); + cellBuilder.setValue(ByteString.copyFrom(Bytes. + toBytes(sn.getHostAndPort()))); + return cellBuilder.build(); + } + + static CellProtos.Cell getStartCode(final ByteString row) { + CellProtos.Cell.Builder cellBuilder = getBaseCellBuilder(row); + cellBuilder.setQualifier(ByteString.copyFrom(HConstants.STARTCODE_QUALIFIER)); + // TODO: + cellBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(META_SERVERNAME.getStartcode()))); + return cellBuilder.build(); + } + + private static final byte [] BIG_USER_TABLE = Bytes.toBytes("t"); + + /* + * Format passed integer. Zero-pad. + * Copied from hbase-server PE class and small amendment. Make them share. + * @param number + * @return Returns zero-prefixed 10-byte wide decimal version of passed + * number (Does absolute in case number is negative). + */ + static byte [] format(final long number) { + byte [] b = new byte[10]; + long d = number; + for (int i = b.length - 1; i >= 0; i--) { + b[i] = (byte)((d % 10) + '0'); + d /= 10; + } + return b; + } + + /** + * @param count + * @param namespaceSpan + * @return count regions + */ + private static HRegionInfo [] makeHRegionInfos(final byte [] tableName, final int count, + final long namespaceSpan) { + byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; + long interval = namespaceSpan / count; + HRegionInfo [] hris = new HRegionInfo[count]; + for (int i = 0; i < count; i++) { + if (i == 0) { + endKey = format(interval); + } else { + startKey = endKey; + if (i == count - 1) endKey = HConstants.EMPTY_BYTE_ARRAY; + else endKey = format((i + 1) * interval); + } + hris[i] = new HRegionInfo(TableName.valueOf(tableName), startKey, endKey); + } + return hris; + } + + /** + * @param count + * @return Return count servernames. + */ + private static ServerName [] makeServerNames(final int count) { + ServerName [] sns = new ServerName[count]; + for (int i = 0; i < count; i++) { + sns[i] = new ServerName("" + i + ".example.org", 60010, i); + } + return sns; + } + + /** + * Comparator for meta row keys. + */ + static class MetaRowsComparator implements Comparator { + private final KeyValue.KVComparator delegate = new KeyValue.MetaComparator(); + @Override + public int compare(byte[] left, byte[] right) { + return delegate.compareRows(left, 0, left.length, right, 0, right.length); + } + + @Override + public Comparator reverseOrder() { + throw new UnsupportedOperationException(); + } + + @Override + public Comparator thenComparing(Comparator other) { + throw new UnsupportedOperationException(); + } + } + + /** + * Create up a map that is keyed by meta row name and whose value is the HRegionInfo and + * ServerName to return for this row. + * @param hris + * @param serverNames + * @return Map with faked hbase:meta content in it. + */ + static SortedMap> makeMeta(final byte [] tableName, + final int regionCount, final long namespaceSpan, final int serverCount) { + // I need a comparator for meta rows so we sort properly. + SortedMap> meta = + new ConcurrentSkipListMap>(new MetaRowsComparator()); + HRegionInfo [] hris = makeHRegionInfos(tableName, regionCount, namespaceSpan); + ServerName [] serverNames = makeServerNames(serverCount); + int per = regionCount / serverCount; + int count = 0; + for (HRegionInfo hri: hris) { + Pair p = + new Pair(hri, serverNames[count++ / per]); + meta.put(hri.getRegionName(), p); + } + return meta; + } + + @Override + public int run(String[] arg0) throws Exception { + int errCode = 0; + // TODO: Make command options + final int servers = 1000; + final int regions = 100000; + final long namespaceSpan = 1000000; + final long cycles = 10; + if ((namespaceSpan < regions) || (regions < servers)) { + throw new IllegalArgumentException("namespaceSpan=" + namespaceSpan + " must be > regions=" + + regions + " which must be > servers=" + servers); + } + // Set my many servers and many regions faking connection in place. + getConf().set("hbase.client.connection.impl", + ManyServersManyRegionsConnection.class.getName()); + // Use simple kv registry rather than zk + getConf().set("hbase.client.registry.impl", SimpleRegistry.class.getName()); + // Ugly but this is only way to pass in configs.into ManyServersManyRegionsConnection class. + getConf().setInt("hbase.test.regions", regions); + getConf().setLong("hbase.test.namespace.span", namespaceSpan); + getConf().setLong("hbase.test.servers", servers); + getConf().set("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE)); + // TODO: Do this in PE? + HTable table = new HTable(getConf(), BIG_USER_TABLE); + table.setAutoFlushTo(false); + try { + for (int j = 0; j < cycles; j++) { + for (int i = 0; i < namespaceSpan; i++) { + byte [] b = format(i); + Put p = new Put(b); + p.add(HConstants.CATALOG_FAMILY, b, b); + table.put(p); + } + } + } finally { + table.close(); + } + return errCode; + } + + public static void main(String[] args) throws Exception { + System.exit(ToolRunner.run(HBaseConfiguration.create(), new TestClientNoCluster(), args)); + } } \ No newline at end of file diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java index 1db0d8c..1ba72bb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/catalog/MetaEditor.java @@ -557,4 +557,4 @@ public class MetaEditor { Bytes.toBytes(openSeqNum)); return p; } -} +} \ No newline at end of file