From 4e7c20e0bf954636fccd59ddf8c4ba2736f271d9 Mon Sep 17 00:00:00 2001 From: Viraj Jasani Date: Thu, 26 Sep 2019 00:13:59 +0530 Subject: [PATCH] HBASE-23015 : Moving from Jackson2 to shaded Gson (Backport HBASE-20587) --- hbase-client/pom.xml | 4 + .../apache/hadoop/hbase/util/JsonMapper.java | 11 +- .../hadoop/hbase/client/TestOperation.java | 23 +- hbase-common/pom.xml | 4 + .../apache/hadoop/hbase/util/GsonUtil.java | 60 +++ .../apache/hadoop/hbase/util/LongAdder.java | 230 +++++++++++ .../apache/hadoop/hbase/util/Striped64.java | 357 ++++++++++++++++++ .../main/resources/supplemental-models.xml | 19 + hbase-server/pom.xml | 4 + .../hadoop/hbase/http/jmx/JMXJsonServlet.java | 8 +- .../hadoop/hbase/io/hfile/AgeSnapshot.java | 5 +- .../hadoop/hbase/io/hfile/BlockCacheUtil.java | 71 ++-- .../hadoop/hbase/io/hfile/LruBlockCache.java | 15 +- .../io/hfile/bucket/BucketAllocator.java | 5 +- .../hbase/io/hfile/bucket/BucketCache.java | 21 +- .../apache/hadoop/hbase/ipc/RpcServer.java | 7 +- .../hbase/monitoring/MonitoredTaskImpl.java | 8 +- .../apache/hadoop/hbase/util/JSONBean.java | 190 +++++----- .../hadoop/hbase/wal/WALPrettyPrinter.java | 8 +- .../hadoop/hbase/PerformanceEvaluation.java | 17 +- .../hbase/TestPerformanceEvaluation.java | 9 +- hbase-testing-util/pom.xml | 4 + pom.xml | 6 + 23 files changed, 898 insertions(+), 188 deletions(-) create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/LongAdder.java create mode 100644 hbase-common/src/main/java/org/apache/hadoop/hbase/util/Striped64.java diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index 26bf67f18a..93e3b294dd 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -252,6 +252,10 @@ junit test + + org.apache.hbase.thirdparty + hbase-shaded-gson + diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java index 66b809b7c9..12a0a12a67 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/JsonMapper.java @@ -18,12 +18,12 @@ */ package org.apache.hadoop.hbase.util; -import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; import java.util.Map; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hbase.thirdparty.com.google.gson.Gson; /** * Utility class for converting objects to JSON @@ -34,12 +34,13 @@ public final class JsonMapper { private JsonMapper() { } - private static final ObjectMapper MAPPER = new ObjectMapper(); + private static final Gson GSON = GsonUtil.createGson().create(); - public static String writeMapAsString(Map map) throws IOException { + public static String writeMapAsString(Map map) throws IOException { return writeObjectAsString(map); } - public static String writeObjectAsString(Object object) throws IOException { - return MAPPER.writeValueAsString(object); + + public static String writeObjectAsString(Object object) throws IOException { + return GSON.toJson(object); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index 5e3fec1539..81e0646181 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -27,11 +27,10 @@ import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.Assert; import org.junit.Test; -import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; +import java.lang.reflect.Type; import java.nio.ByteBuffer; import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; @@ -62,9 +61,9 @@ import org.apache.hadoop.hbase.filter.ValueFilter; import org.apache.hadoop.hbase.filter.WhileMatchFilter; import org.apache.hadoop.hbase.util.BuilderStyleTest; import org.apache.hadoop.hbase.util.Bytes; - -import org.junit.Assert; -import org.junit.Test; +import org.apache.hadoop.hbase.util.GsonUtil; +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.apache.hbase.thirdparty.com.google.gson.reflect.TypeToken; import org.junit.experimental.categories.Category; /** @@ -78,7 +77,7 @@ public class TestOperation { private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); private static byte [] VALUE = Bytes.toBytes("testValue"); - private static ObjectMapper mapper = new ObjectMapper(); + private static Gson GSON = GsonUtil.createGson().create(); private static List TS_LIST = Arrays.asList(2L, 3L, 5L); private static TimestampsFilter TS_FILTER = new TimestampsFilter(TS_LIST); @@ -304,7 +303,9 @@ public class TestOperation { scan.addColumn(FAMILY, QUALIFIER); // get its JSON representation, and parse it String json = scan.toJSON(); - Map parsedJSON = mapper.readValue(json, HashMap.class); + Type typeOfHashMap = new TypeToken>() { + }.getType(); + Map parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row assertEquals("startRow incorrect in Scan.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("startRow")); @@ -322,7 +323,7 @@ public class TestOperation { get.addColumn(FAMILY, QUALIFIER); // get its JSON representation, and parse it json = get.toJSON(); - parsedJSON = mapper.readValue(json, HashMap.class); + parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row assertEquals("row incorrect in Get.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); @@ -340,7 +341,7 @@ public class TestOperation { put.add(FAMILY, QUALIFIER, VALUE); // get its JSON representation, and parse it json = put.toJSON(); - parsedJSON = mapper.readValue(json, HashMap.class); + parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row assertEquals("row absent in Put.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); @@ -354,14 +355,14 @@ public class TestOperation { Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); assertEquals("Value length incorrect in Put.toJSON()", - VALUE.length, kvMap.get("vlen")); + VALUE.length, ((Number) kvMap.get("vlen")).intValue()); // produce a Delete operation Delete delete = new Delete(ROW); delete.deleteColumn(FAMILY, QUALIFIER); // get its JSON representation, and parse it json = delete.toJSON(); - parsedJSON = mapper.readValue(json, HashMap.class); + parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row assertEquals("row absent in Delete.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index 18f64c5d8a..69ac706179 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -277,6 +277,10 @@ jackson-mapper-asl provided + + org.apache.hbase.thirdparty + hbase-shaded-gson + com.fasterxml.jackson.jaxrs jackson-jaxrs-json-provider diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java new file mode 100644 index 0000000000..d1c65ef78c --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/GsonUtil.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.util; + +import java.io.IOException; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.gson.GsonBuilder; +import org.apache.hbase.thirdparty.com.google.gson.LongSerializationPolicy; +import org.apache.hbase.thirdparty.com.google.gson.TypeAdapter; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonReader; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; + +/** + * Helper class for gson. + */ +@InterfaceAudience.Private +public final class GsonUtil { + + private GsonUtil() { + } + + /** + * Create a builder which is used to create a Gson instance. + *

+ * Will set some common configs for the builder. + */ + public static GsonBuilder createGson() { + return new GsonBuilder().setLongSerializationPolicy(LongSerializationPolicy.STRING) + .registerTypeAdapter(LongAdder.class, new TypeAdapter() { + + @Override + public void write(JsonWriter out, LongAdder value) throws IOException { + out.value(value.longValue()); + } + + @Override + public LongAdder read(JsonReader in) throws IOException { + LongAdder value = new LongAdder(); + value.add(in.nextLong()); + return value; + } + }); + } +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/LongAdder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/LongAdder.java new file mode 100644 index 0000000000..b8dc35bdc2 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/LongAdder.java @@ -0,0 +1,230 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package org.apache.hadoop.hbase.util; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.io.Serializable; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; +import org.apache.hadoop.hbase.classification.InterfaceStability; + +/** + * One or more variables that together maintain an initially zero + * {@code long} sum. When updates (method {@link #add}) are contended + * across threads, the set of variables may grow dynamically to reduce + * contention. Method {@link #sum} (or, equivalently, {@link + * #longValue}) returns the current total combined across the + * variables maintaining the sum. + * + *

This class is usually preferable to {@link AtomicLong} when + * multiple threads update a common sum that is used for purposes such + * as collecting statistics, not for fine-grained synchronization + * control. Under low update contention, the two classes have similar + * characteristics. But under high contention, expected throughput of + * this class is significantly higher, at the expense of higher space + * consumption. + * + *

This class extends {@link Number}, but does not define + * methods such as {@code equals}, {@code hashCode} and {@code + * compareTo} because instances are expected to be mutated, and so are + * not useful as collection keys. + * + *

jsr166e note: This class is targeted to be placed in + * java.util.concurrent.atomic. + * + * @since 1.8 + */ +@InterfaceAudience.Public +@InterfaceStability.Stable +public class LongAdder extends Striped64 implements Serializable { + private static final long serialVersionUID = 7249069246863182397L; + + /** + * Version of plus for use in retryUpdate + */ + @Override + final long fn(long v, long x) { return v + x; } + + /** + * Creates a new adder with initial sum of zero. + */ + public LongAdder() { + } + + /** + * Adds the given value. + * + * @param x the value to add + */ + public void add(long x) { + Cell[] as; long b, v; int[] hc; Cell a; int n; + if ((as = cells) != null || !casBase(b = base, b + x)) { + boolean uncontended = true; + if ((hc = threadHashCode.get()) == null || + as == null || (n = as.length) < 1 || + (a = as[(n - 1) & hc[0]]) == null || + !(uncontended = a.cas(v = a.value, v + x))) + retryUpdate(x, hc, uncontended); + } + } + + /** + * Equivalent to {@code add(1)}. + */ + public void increment() { + add(1L); + } + + /** + * Equivalent to {@code add(-1)}. + */ + public void decrement() { + add(-1L); + } + + /** + * Returns the current sum. The returned value is NOT an + * atomic snapshot; invocation in the absence of concurrent + * updates returns an accurate result, but concurrent updates that + * occur while the sum is being calculated might not be + * incorporated. + * + * @return the sum + */ + public long sum() { + long sum = base; + Cell[] as = cells; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + sum += a.value; + } + } + return sum; + } + + /** + * Resets variables maintaining the sum to zero. This method may + * be a useful alternative to creating a new adder, but is only + * effective if there are no concurrent updates. Because this + * method is intrinsically racy, it should only be used when it is + * known that no threads are concurrently updating. + */ + public void reset() { + internalReset(0L); + } + + /** + * Equivalent in effect to {@link #sum} followed by {@link + * #reset}. This method may apply for example during quiescent + * points between multithreaded computations. If there are + * updates concurrent with this method, the returned value is + * not guaranteed to be the final value occurring before + * the reset. + * + * @return the sum + */ + public long sumThenReset() { + long sum = base; + Cell[] as = cells; + base = 0L; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) { + sum += a.value; + a.value = 0L; + } + } + } + return sum; + } + + /** + * Returns the String representation of the {@link #sum}. + * @return the String representation of the {@link #sum} + */ + @Override + public String toString() { + return Long.toString(sum()); + } + + /** + * Equivalent to {@link #sum}. + * + * @return the sum + */ + @Override + public long longValue() { + return sum(); + } + + /** + * Returns the {@link #sum} as an {@code int} after a narrowing + * primitive conversion. + */ + @Override + public int intValue() { + return (int)sum(); + } + + /** + * Returns the {@link #sum} as a {@code float} + * after a widening primitive conversion. + */ + @Override + public float floatValue() { + return (float)sum(); + } + + /** + * Returns the {@link #sum} as a {@code double} after a widening + * primitive conversion. + */ + @Override + public double doubleValue() { + return (double)sum(); + } + + private void writeObject(ObjectOutputStream s) throws IOException { + s.defaultWriteObject(); + s.writeLong(sum()); + } + + private void readObject(ObjectInputStream s) + throws IOException, ClassNotFoundException { + s.defaultReadObject(); + busy = 0; + cells = null; + base = s.readLong(); + } + +} diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Striped64.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Striped64.java new file mode 100644 index 0000000000..32aad13cf7 --- /dev/null +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Striped64.java @@ -0,0 +1,357 @@ +/** + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package org.apache.hadoop.hbase.util; + +import java.util.Random; + +import org.apache.hadoop.hbase.classification.InterfaceAudience; + +/** + * A package-local class holding common representation and mechanics + * for classes supporting dynamic striping on 64bit values. The class + * extends Number so that concrete subclasses must publicly do so. + */ +@InterfaceAudience.Private +abstract class Striped64 extends Number { + /* + * This class maintains a lazily-initialized table of atomically + * updated variables, plus an extra "base" field. The table size + * is a power of two. Indexing uses masked per-thread hash codes. + * Nearly all declarations in this class are package-private, + * accessed directly by subclasses. + * + * Table entries are of class Cell; a variant of AtomicLong padded + * to reduce cache contention on most processors. Padding is + * overkill for most Atomics because they are usually irregularly + * scattered in memory and thus don't interfere much with each + * other. But Atomic objects residing in arrays will tend to be + * placed adjacent to each other, and so will most often share + * cache lines (with a huge negative performance impact) without + * this precaution. + * + * In part because Cells are relatively large, we avoid creating + * them until they are needed. When there is no contention, all + * updates are made to the base field. Upon first contention (a + * failed CAS on base update), the table is initialized to size 2. + * The table size is doubled upon further contention until + * reaching the nearest power of two greater than or equal to the + * number of CPUS. Table slots remain empty (null) until they are + * needed. + * + * A single spinlock ("busy") is used for initializing and + * resizing the table, as well as populating slots with new Cells. + * There is no need for a blocking lock; when the lock is not + * available, threads try other slots (or the base). During these + * retries, there is increased contention and reduced locality, + * which is still better than alternatives. + * + * Per-thread hash codes are initialized to random values. + * Contention and/or table collisions are indicated by failed + * CASes when performing an update operation (see method + * retryUpdate). Upon a collision, if the table size is less than + * the capacity, it is doubled in size unless some other thread + * holds the lock. If a hashed slot is empty, and lock is + * available, a new Cell is created. Otherwise, if the slot + * exists, a CAS is tried. Retries proceed by "double hashing", + * using a secondary hash (Marsaglia XorShift) to try to find a + * free slot. + * + * The table size is capped because, when there are more threads + * than CPUs, supposing that each thread were bound to a CPU, + * there would exist a perfect hash function mapping threads to + * slots that eliminates collisions. When we reach capacity, we + * search for this mapping by randomly varying the hash codes of + * colliding threads. Because search is random, and collisions + * only become known via CAS failures, convergence can be slow, + * and because threads are typically not bound to CPUS forever, + * may not occur at all. However, despite these limitations, + * observed contention rates are typically low in these cases. + * + * It is possible for a Cell to become unused when threads that + * once hashed to it terminate, as well as in the case where + * doubling the table causes no thread to hash to it under + * expanded mask. We do not try to detect or remove such cells, + * under the assumption that for long-running instances, observed + * contention levels will recur, so the cells will eventually be + * needed again; and for short-lived ones, it does not matter. + */ + + /** + * Padded variant of AtomicLong supporting only raw accesses plus CAS. + * The value field is placed between pads, hoping that the JVM doesn't + * reorder them. + * + * JVM intrinsics note: It would be possible to use a release-only + * form of CAS here, if it were provided. + */ + static final class Cell { + volatile long p0, p1, p2, p3, p4, p5, p6; + volatile long value; + volatile long q0, q1, q2, q3, q4, q5, q6; + Cell(long x) { value = x; } + + final boolean cas(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, valueOffset, cmp, val); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long valueOffset; + static { + try { + UNSAFE = getUnsafe(); + Class ak = Cell.class; + valueOffset = UNSAFE.objectFieldOffset + (ak.getDeclaredField("value")); + } catch (Exception e) { + throw new Error(e); + } + } + + } + + /** + * ThreadLocal holding a single-slot int array holding hash code. + * Unlike the JDK8 version of this class, we use a suboptimal + * int[] representation to avoid introducing a new type that can + * impede class-unloading when ThreadLocals are not removed. + */ + static final ThreadLocal threadHashCode = new ThreadLocal(); + + /** + * Generator of new random hash codes + */ + static final Random rng = new Random(); + + /** Number of CPUS, to place bound on table size */ + static final int NCPU = Runtime.getRuntime().availableProcessors(); + + /** + * Table of cells. When non-null, size is a power of 2. + */ + transient volatile Cell[] cells; + + /** + * Base value, used mainly when there is no contention, but also as + * a fallback during table initialization races. Updated via CAS. + */ + transient volatile long base; + + /** + * Spinlock (locked via CAS) used when resizing and/or creating Cells. + */ + transient volatile int busy; + + /** + * Package-private default constructor + */ + Striped64() { + } + + /** + * CASes the base field. + */ + final boolean casBase(long cmp, long val) { + return UNSAFE.compareAndSwapLong(this, baseOffset, cmp, val); + } + + /** + * CASes the busy field from 0 to 1 to acquire lock. + */ + final boolean casBusy() { + return UNSAFE.compareAndSwapInt(this, busyOffset, 0, 1); + } + + /** + * Computes the function of current and new value. Subclasses + * should open-code this update function for most uses, but the + * virtualized form is needed within retryUpdate. + * + * @param currentValue the current value (of either base or a cell) + * @param newValue the argument from a user update call + * @return result of the update function + */ + abstract long fn(long currentValue, long newValue); + + /** + * Handles cases of updates involving initialization, resizing, + * creating new Cells, and/or contention. See above for + * explanation. This method suffers the usual non-modularity + * problems of optimistic retry code, relying on rechecked sets of + * reads. + * + * @param x the value + * @param hc the hash code holder + * @param wasUncontended false if CAS failed before call + */ + final void retryUpdate(long x, int[] hc, boolean wasUncontended) { + int h; + if (hc == null) { + threadHashCode.set(hc = new int[1]); // Initialize randomly + int r = rng.nextInt(); // Avoid zero to allow xorShift rehash + h = hc[0] = (r == 0) ? 1 : r; + } + else + h = hc[0]; + boolean collide = false; // True if last slot nonempty + for (;;) { + Cell[] as; Cell a; int n; long v; + if ((as = cells) != null && (n = as.length) > 0) { + if ((a = as[(n - 1) & h]) == null) { + if (busy == 0) { // Try to attach new Cell + Cell r = new Cell(x); // Optimistically create + if (busy == 0 && casBusy()) { + boolean created = false; + try { // Recheck under lock + Cell[] rs; int m, j; + if ((rs = cells) != null && + (m = rs.length) > 0 && + rs[j = (m - 1) & h] == null) { + rs[j] = r; + created = true; + } + } finally { + busy = 0; + } + if (created) + break; + continue; // Slot is now non-empty + } + } + collide = false; + } + else if (!wasUncontended) // CAS already known to fail + wasUncontended = true; // Continue after rehash + else if (a.cas(v = a.value, fn(v, x))) + break; + else if (n >= NCPU || cells != as) + collide = false; // At max size or stale + else if (!collide) + collide = true; + else if (busy == 0 && casBusy()) { + try { + if (cells == as) { // Expand table unless stale + Cell[] rs = new Cell[n << 1]; + for (int i = 0; i < n; ++i) + rs[i] = as[i]; + cells = rs; + } + } finally { + busy = 0; + } + collide = false; + continue; // Retry with expanded table + } + h ^= h << 13; // Rehash + h ^= h >>> 17; + h ^= h << 5; + hc[0] = h; // Record index for next time + } + else if (busy == 0 && cells == as && casBusy()) { + boolean init = false; + try { // Initialize table + if (cells == as) { + Cell[] rs = new Cell[2]; + rs[h & 1] = new Cell(x); + cells = rs; + init = true; + } + } finally { + busy = 0; + } + if (init) + break; + } + else if (casBase(v = base, fn(v, x))) + break; // Fall back on using base + } + } + + + /** + * Sets base and all cells to the given value. + */ + final void internalReset(long initialValue) { + Cell[] as = cells; + base = initialValue; + if (as != null) { + int n = as.length; + for (int i = 0; i < n; ++i) { + Cell a = as[i]; + if (a != null) + a.value = initialValue; + } + } + } + + // Unsafe mechanics + private static final sun.misc.Unsafe UNSAFE; + private static final long baseOffset; + private static final long busyOffset; + static { + try { + UNSAFE = getUnsafe(); + Class sk = Striped64.class; + baseOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("base")); + busyOffset = UNSAFE.objectFieldOffset + (sk.getDeclaredField("busy")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException tryReflectionInstead) {} + try { + return java.security.AccessController.doPrivileged + (new java.security.PrivilegedExceptionAction() { + @Override + public sun.misc.Unsafe run() throws Exception { + Class k = sun.misc.Unsafe.class; + for (java.lang.reflect.Field f : k.getDeclaredFields()) { + f.setAccessible(true); + Object x = f.get(null); + if (k.isInstance(x)) + return k.cast(x); + } + throw new NoSuchFieldError("the Unsafe"); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } +} \ No newline at end of file diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml b/hbase-resource-bundle/src/main/resources/supplemental-models.xml index 9db6f94a97..ce13032524 100644 --- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml +++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml @@ -1665,6 +1665,25 @@ Copyright 2005 Sun Microsystems, Inc. and portions Copyright Apache Software Fou + + + com.google.errorprone + error_prone_annotations + + + Google + http://www.google.com + + + + + Apache License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + org.jamon diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 388ecbbd68..e892bae12b 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -484,6 +484,10 @@ org.apache.commons commons-math + + org.apache.hbase.thirdparty + hbase-shaded-gson + log4j log4j diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java index 0d7d99ab8c..872b874282 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java @@ -163,7 +163,6 @@ public class JMXJsonServlet extends HttpServlet { try { jsonpcb = checkCallbackName(request.getParameter(CALLBACK_PARAM)); writer = response.getWriter(); - beanWriter = this.jsonBeanWriter.open(writer); // "callback" parameter implies JSONP outpout if (jsonpcb != null) { response.setContentType("application/javascript; charset=utf8"); @@ -171,6 +170,7 @@ public class JMXJsonServlet extends HttpServlet { } else { response.setContentType("application/json; charset=utf8"); } + beanWriter = this.jsonBeanWriter.open(writer); // Should we output description on each attribute and bean? String tmpStr = request.getParameter(INCLUDE_DESCRIPTION); boolean description = tmpStr != null && tmpStr.length() > 0; @@ -204,9 +204,11 @@ public class JMXJsonServlet extends HttpServlet { response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } } finally { - if (beanWriter != null) beanWriter.close(); + if (beanWriter != null) { + beanWriter.close(); + } if (jsonpcb != null) { - writer.write(");"); + writer.write(");"); } if (writer != null) { writer.close(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java index c2eaf4a6fb..e41d70776a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AgeSnapshot.java @@ -25,11 +25,10 @@ import org.apache.hadoop.hbase.util.FastLongHistogram; * Snapshot of block cache age in cache. * This object is preferred because we can control how it is serialized out when JSON'ing. */ -@JsonIgnoreProperties({"ageHistogram", "snapshot"}) public class AgeSnapshot { - private final FastLongHistogram ageHistogram; - private final long[] quantiles; + private transient final FastLongHistogram ageHistogram; + private transient final long[] quantiles; AgeSnapshot(final FastLongHistogram ageHistogram) { this.ageHistogram = ageHistogram; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java index f1b6dfa1ab..446c1de163 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/BlockCacheUtil.java @@ -17,11 +17,6 @@ */ package org.apache.hadoop.hbase.io.hfile; -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; -import com.fasterxml.jackson.core.JsonGenerationException; -import com.fasterxml.jackson.databind.JsonMappingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.SerializationFeature; import java.io.IOException; import java.util.NavigableMap; import java.util.NavigableSet; @@ -31,6 +26,13 @@ import java.util.concurrent.ConcurrentSkipListSet; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.FastLongHistogram; +import org.apache.hadoop.hbase.util.GsonUtil; + + +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.apache.hbase.thirdparty.com.google.gson.TypeAdapter; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonReader; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; /** * Utilty for aggregating counts in CachedBlocks and toString/toJSON CachedBlocks and BlockCaches. @@ -41,12 +43,29 @@ public class BlockCacheUtil { /** * Needed generating JSON. */ - private static final ObjectMapper MAPPER = new ObjectMapper(); - static { - MAPPER.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false); - MAPPER.configure(SerializationFeature.FLUSH_AFTER_WRITE_VALUE, true); - MAPPER.configure(SerializationFeature.INDENT_OUTPUT, true); - } + private static final Gson GSON = GsonUtil.createGson() + .registerTypeAdapter(FastLongHistogram.class, new TypeAdapter() { + + @Override + public void write(JsonWriter out, FastLongHistogram value) throws IOException { + AgeSnapshot snapshot = new AgeSnapshot(value); + out.beginObject(); + out.name("mean").value(snapshot.getMean()); + out.name("min").value(snapshot.getMin()); + out.name("max").value(snapshot.getMax()); + out.name("75thPercentile").value(snapshot.get75thPercentile()); + out.name("95thPercentile").value(snapshot.get95thPercentile()); + out.name("98thPercentile").value(snapshot.get98thPercentile()); + out.name("99thPercentile").value(snapshot.get99thPercentile()); + out.name("999thPercentile").value(snapshot.get999thPercentile()); + out.endObject(); + } + + @Override + public FastLongHistogram read(JsonReader in) throws IOException { + throw new UnsupportedOperationException(); + } + }).setPrettyPrinting().create(); /** * @param cb @@ -93,15 +112,10 @@ public class BlockCacheUtil { } /** - * @param filename - * @param blocks * @return A JSON String of filename and counts of blocks - * @throws JsonGenerationException - * @throws JsonMappingException - * @throws IOException */ public static String toJSON(final String filename, final NavigableSet blocks) - throws JsonGenerationException, JsonMappingException, IOException { + throws IOException { CachedBlockCountsPerFile counts = new CachedBlockCountsPerFile(filename); for (CachedBlock cb: blocks) { counts.count++; @@ -112,31 +126,21 @@ public class BlockCacheUtil { counts.sizeData += cb.getSize(); } } - return MAPPER.writeValueAsString(counts); + return GSON.toJson(counts); } /** - * @param cbsbf * @return JSON string of cbsf aggregated - * @throws JsonGenerationException - * @throws JsonMappingException - * @throws IOException */ - public static String toJSON(final CachedBlocksByFile cbsbf) - throws JsonGenerationException, JsonMappingException, IOException { - return MAPPER.writeValueAsString(cbsbf); + public static String toJSON(final CachedBlocksByFile cbsbf) throws IOException { + return GSON.toJson(cbsbf); } /** - * @param bc * @return JSON string of bc content. - * @throws JsonGenerationException - * @throws JsonMappingException - * @throws IOException */ - public static String toJSON(final BlockCache bc) - throws JsonGenerationException, JsonMappingException, IOException { - return MAPPER.writeValueAsString(bc); + public static String toJSON(final BlockCache bc) throws IOException { + return GSON.toJson(bc); } /** @@ -172,7 +176,6 @@ public class BlockCacheUtil { * This is different than metrics in that it is stats on current state of a cache. * See getLoadedCachedBlocksByFile */ - @JsonIgnoreProperties({"cachedBlockStatsByFile"}) public static class CachedBlocksByFile { private int count; private int dataBlockCount; @@ -200,7 +203,7 @@ public class BlockCacheUtil { /** * Map by filename. use concurent utils because we want our Map and contained blocks sorted. */ - private NavigableMap> cachedBlockByFile = + private transient NavigableMap> cachedBlockByFile = new ConcurrentSkipListMap>(); FastLongHistogram hist = new FastLongHistogram(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java index b6617d81b0..78bb193645 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/LruBlockCache.java @@ -100,7 +100,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder; * to the relative sizes and usage. */ @InterfaceAudience.Private -@JsonIgnoreProperties({"encodingCountsForTest"}) public class LruBlockCache implements ResizableBlockCache, HeapSize { private static final Log LOG = LogFactory.getLog(LruBlockCache.class); @@ -157,21 +156,23 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { private static final long DEFAULT_MAX_BLOCK_SIZE = 16L * 1024L * 1024L; /** Concurrent map (the cache) */ - private final Map map; + private transient final Map map; /** Eviction lock (locked when eviction in process) */ - private final ReentrantLock evictionLock = new ReentrantLock(true); + private transient final ReentrantLock evictionLock = new ReentrantLock(true); private final long maxBlockSize; /** Volatile boolean to track if we are in an eviction process or not */ private volatile boolean evictionInProgress = false; /** Eviction thread */ - private final EvictionThread evictionThread; + private transient final EvictionThread evictionThread; /** Statistics thread schedule pool (for heavy debugging, could remove) */ - private final ScheduledExecutorService scheduleThreadPool = Executors.newScheduledThreadPool(1, - new ThreadFactoryBuilder().setNameFormat("LruBlockCacheStatsExecutor").setDaemon(true).build()); + private transient final ScheduledExecutorService scheduleThreadPool = + Executors.newScheduledThreadPool(1, + new ThreadFactoryBuilder().setNameFormat("LruBlockCacheStatsExecutor") + .setDaemon(true).build()); /** Current size of cache */ private final AtomicLong size; @@ -216,7 +217,7 @@ public class LruBlockCache implements ResizableBlockCache, HeapSize { private boolean forceInMemory; /** Where to send victims (blocks evicted/missing from the cache) */ - private BlockCache victimHandler = null; + private transient BlockCache victimHandler = null; /** * Default constructor. Specify maximum size and expected average block diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java index 4e04c98725..3c17d64395 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.io.hfile.bucket; -import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import java.util.Arrays; import java.util.Comparator; import java.util.HashSet; @@ -52,11 +51,9 @@ import com.google.common.primitives.Ints; * This class is not thread safe. */ @InterfaceAudience.Private -@JsonIgnoreProperties({"indexStatistics", "freeSize", "usedSize"}) public final class BucketAllocator { private static final Log LOG = LogFactory.getLog(BucketAllocator.class); - @JsonIgnoreProperties({"completelyFree", "uninstantiated"}) public final static class Bucket { private long baseOffset; private int itemAllocationSize, sizeIndex; @@ -308,7 +305,7 @@ public final class BucketAllocator { private Bucket[] buckets; private BucketSizeInfo[] bucketSizeInfos; private final long totalSize; - private long usedSize = 0; + private transient long usedSize = 0; BucketAllocator(long availableSpace, int[] bucketSizes) throws BucketAllocatorException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index b7bab31639..857e8520a2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -115,14 +115,14 @@ public class BucketCache implements BlockCache, HeapSize { final static int DEFAULT_WRITER_QUEUE_ITEMS = 64; // Store/read block data - final IOEngine ioEngine; + transient final IOEngine ioEngine; // Store the block in this map before writing it to cache @VisibleForTesting - final ConcurrentMap ramCache; + transient final ConcurrentMap ramCache; // In this map, store the block's meta data like offset, length @VisibleForTesting - ConcurrentMap backingMap; + transient ConcurrentMap backingMap; /** * Flag if the cache is enabled or not... We shut it off if there are IO @@ -139,14 +139,14 @@ public class BucketCache implements BlockCache, HeapSize { * to the BucketCache. It then updates the ramCache and backingMap accordingly. */ @VisibleForTesting - final ArrayList> writerQueues = + transient final ArrayList> writerQueues = new ArrayList>(); @VisibleForTesting - final WriterThread[] writerThreads; + transient final WriterThread[] writerThreads; /** Volatile boolean to track if free space is in process or not */ private volatile boolean freeInProgress = false; - private final Lock freeSpaceLock = new ReentrantLock(); + private transient final Lock freeSpaceLock = new ReentrantLock(); private UniqueIndexMap deserialiserMap = new UniqueIndexMap(); @@ -185,7 +185,7 @@ public class BucketCache implements BlockCache, HeapSize { * The purpose of this is to avoid freeing the block which is being read. */ @VisibleForTesting - final IdReadWriteLock offsetLock = new IdReadWriteLock(); + transient final IdReadWriteLock offsetLock = new IdReadWriteLock(); private final NavigableSet blocksByHFile = new ConcurrentSkipListSet(new Comparator() { @@ -206,11 +206,12 @@ public class BucketCache implements BlockCache, HeapSize { }); /** Statistics thread schedule pool (for heavy debugging, could remove) */ - private final ScheduledExecutorService scheduleThreadPool = Executors.newScheduledThreadPool(1, - new ThreadFactoryBuilder().setNameFormat("BucketCacheStatsExecutor").setDaemon(true).build()); + private transient final ScheduledExecutorService scheduleThreadPool = + Executors.newScheduledThreadPool(1, + new ThreadFactoryBuilder().setNameFormat("BucketCacheStatsExecutor").setDaemon(true).build()); // Allocate or free space for the block - private BucketAllocator bucketAllocator; + private transient BucketAllocator bucketAllocator; public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, int writerThreadNum, int writerQLen, String persistencePath) throws FileNotFoundException, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java index cdd86636be..5990bfc5ce 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.ipc; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION; -import com.fasterxml.jackson.databind.ObjectMapper; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.BlockingService; @@ -128,6 +127,7 @@ import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Counter; +import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.BytesWritable; @@ -145,6 +145,7 @@ import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.StringUtils; +import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.htrace.TraceInfo; /** @@ -288,7 +289,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { private static final int DEFAULT_WARN_RESPONSE_TIME = 10000; // milliseconds private static final int DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024; - private static final ObjectMapper MAPPER = new ObjectMapper(); + protected static final Gson GSON = GsonUtil.createGson().create(); protected static final int DEFAULT_TRACE_LOG_MAX_LENGTH = 1000; protected static final String TRACE_LOG_MAX_LENGTH = "hbase.ipc.trace.log.max.length"; @@ -2520,7 +2521,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver { responseInfo.put("multi.mutations", numMutations); responseInfo.put("multi.servicecalls", numServiceCalls); } - LOG.warn("(response" + tag + "): " + MAPPER.writeValueAsString(responseInfo)); + LOG.warn("(response" + tag + "): " + GSON.toJson(responseInfo)); } /** Stops the service. No new calls will be handled after this is called. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java index 7174b3f841..41705dbe98 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/MonitoredTaskImpl.java @@ -18,7 +18,6 @@ */ package org.apache.hadoop.hbase.monitoring; -import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hadoop.hbase.classification.InterfaceAudience; import java.io.IOException; @@ -28,6 +27,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import org.apache.hadoop.hbase.util.GsonUtil; +import org.apache.hbase.thirdparty.com.google.gson.Gson; + @InterfaceAudience.Private class MonitoredTaskImpl implements MonitoredTask { private long startTime; @@ -43,7 +45,7 @@ class MonitoredTaskImpl implements MonitoredTask { private boolean journalEnabled = false; private List journal; - private static final ObjectMapper MAPPER = new ObjectMapper(); + private static final Gson GSON = GsonUtil.createGson().create(); public MonitoredTaskImpl() { startTime = System.currentTimeMillis(); @@ -210,7 +212,7 @@ class MonitoredTaskImpl implements MonitoredTask { @Override public String toJSON() throws IOException { - return MAPPER.writeValueAsString(toMap()); + return GSON.toJson(toMap()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java index 79c510d323..88a49957dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java @@ -16,9 +16,6 @@ */ package org.apache.hadoop.hbase.util; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonGenerator; - import java.io.BufferedWriter; import java.io.Closeable; import java.io.IOException; @@ -47,54 +44,77 @@ import javax.management.openmbean.TabularData; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; /** * Utility for doing JSON and MBeans. */ public class JSONBean { private static final Log LOG = LogFactory.getLog(JSONBean.class); - private final JsonFactory jsonFactory; - - public JSONBean() { - this.jsonFactory = new JsonFactory(); - } + private static final Gson GSON = GsonUtil.createGson().create(); /** * Use dumping out mbeans as JSON. */ public interface Writer extends Closeable { + void write(final String key, final String value) throws IOException; - int write(final MBeanServer mBeanServer, ObjectName qry, String attribute, - final boolean description) throws IOException; + + int write(final MBeanServer mBeanServer, final ObjectName qry, final String attribute, + final boolean description) throws IOException; + void flush() throws IOException; } + /** + * Notice that, closing the return {@link Writer} will not close the {@code writer} passed in, you + * still need to close the {@code writer} by yourself. + *

+ * This is because that, we can only finish the json after you call {@link Writer#close()}. So if + * we just close the {@code writer}, you can write nothing after finished the json. + */ public Writer open(final PrintWriter writer) throws IOException { - final JsonGenerator jg = jsonFactory.createGenerator(writer); - jg.disable(JsonGenerator.Feature.AUTO_CLOSE_TARGET); - jg.useDefaultPrettyPrinter(); - jg.writeStartObject(); + final JsonWriter jsonWriter = GSON.newJsonWriter(new java.io.Writer() { + + @Override + public void write(char[] cbuf, int off, int len) throws IOException { + writer.write(cbuf, off, len); + } + + @Override + public void flush() throws IOException { + writer.flush(); + } + + @Override + public void close() throws IOException { + // do nothing + } + }); + jsonWriter.setIndent(" "); + jsonWriter.beginObject(); return new Writer() { @Override public void flush() throws IOException { - jg.flush(); + jsonWriter.flush(); } @Override public void close() throws IOException { - jg.close(); + jsonWriter.endObject(); + jsonWriter.close(); } @Override public void write(String key, String value) throws IOException { - jg.writeStringField(key, value); + jsonWriter.name(key).value(value); } @Override public int write(MBeanServer mBeanServer, ObjectName qry, String attribute, - boolean description) - throws IOException { - return JSONBean.write(jg, mBeanServer, qry, attribute, description); + boolean description) throws IOException { + return JSONBean.write(jsonWriter, mBeanServer, qry, attribute, description); } }; } @@ -107,14 +127,13 @@ public class JSONBean { * @return Return non-zero if failed to find bean. 0 * @throws IOException */ - private static int write(final JsonGenerator jg, - final MBeanServer mBeanServer, ObjectName qry, String attribute, - final boolean description) - throws IOException { - LOG.trace("Listing beans for "+qry); + private static int write(final JsonWriter writer, final MBeanServer mBeanServer, + final ObjectName qry, final String attribute, final boolean description) throws IOException { + + LOG.trace("Listing beans for " + qry); Set names = null; names = mBeanServer.queryNames(qry, null); - jg.writeArrayFieldStart("beans"); + writer.name("beans").beginArray(); Iterator it = names.iterator(); while (it.hasNext()) { ObjectName oname = it.next(); @@ -125,7 +144,9 @@ public class JSONBean { try { minfo = mBeanServer.getMBeanInfo(oname); code = minfo.getClassName(); - if (description) descriptionStr = minfo.getDescription(); + if (description) { + descriptionStr = minfo.getDescription(); + } String prs = ""; try { if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) { @@ -137,16 +158,16 @@ public class JSONBean { attributeinfo = mBeanServer.getAttribute(oname, prs); } } catch (RuntimeMBeanException e) { - // UnsupportedOperationExceptions happen in the normal course of business, - // so no need to log them as errors all the time. - if (e.getCause() instanceof UnsupportedOperationException) { - if (LOG.isTraceEnabled()) { - LOG.trace("Getting attribute " + prs + " of " + oname + " threw " + e); - } - } else { - LOG.error("Getting attribute " + prs + " of " + oname + " threw an exception", e); - } - return 0; + // UnsupportedOperationExceptions happen in the normal course of business, + // so no need to log them as errors all the time. + if (e.getCause() instanceof UnsupportedOperationException) { + if (LOG.isTraceEnabled()) { + LOG.trace("Getting attribute " + prs + " of " + oname + " threw " + e); + } + } else { + LOG.error("Getting attribute " + prs + " of " + oname + " threw an exception", e); + } + return 0; } catch (AttributeNotFoundException e) { // If the modelerType attribute was not found, the class name is used // instead. @@ -187,39 +208,38 @@ public class JSONBean { continue; } - jg.writeStartObject(); - jg.writeStringField("name", oname.toString()); + writer.beginObject(); + writer.name("name").value(oname.toString()); if (description && descriptionStr != null && descriptionStr.length() > 0) { - jg.writeStringField("description", descriptionStr); + writer.name("description").value(descriptionStr); } - jg.writeStringField("modelerType", code); + writer.name("modelerType").value(code); if (attribute != null && attributeinfo == null) { - jg.writeStringField("result", "ERROR"); - jg.writeStringField("message", "No attribute with name " + attribute + " was found."); - jg.writeEndObject(); - jg.writeEndArray(); - jg.close(); + writer.name("result").value("ERROR"); + writer.name("message").value("No attribute with name " + attribute + " was found."); + writer.endObject(); + writer.endArray(); + writer.close(); return -1; } if (attribute != null) { - writeAttribute(jg, attribute, descriptionStr, attributeinfo); + writeAttribute(writer, attribute, descriptionStr, attributeinfo); } else { MBeanAttributeInfo[] attrs = minfo.getAttributes(); for (int i = 0; i < attrs.length; i++) { - writeAttribute(jg, mBeanServer, oname, description, attrs[i]); + writeAttribute(writer, mBeanServer, oname, description, attrs[i]); } } - jg.writeEndObject(); + writer.endObject(); } - jg.writeEndArray(); + writer.endArray(); return 0; } - private static void writeAttribute(final JsonGenerator jg, - final MBeanServer mBeanServer, ObjectName oname, - final boolean description, final MBeanAttributeInfo attr) - throws IOException { + private static void writeAttribute(final JsonWriter writer, final MBeanServer mBeanServer, + final ObjectName oname, final boolean description, final MBeanAttributeInfo attr) + throws IOException { if (!attr.isReadable()) { return; } @@ -277,71 +297,67 @@ public class JSONBean { return; } - writeAttribute(jg, attName, descriptionStr, value); + writeAttribute(writer, attName, descriptionStr, value); } - private static void writeAttribute(JsonGenerator jg, String attName, final String descriptionStr, - Object value) - throws IOException { + private static void writeAttribute(JsonWriter writer, String attName, String descriptionStr, + Object value) throws IOException { boolean description = false; if (descriptionStr != null && descriptionStr.length() > 0 && !attName.equals(descriptionStr)) { - description = true; - jg.writeFieldName(attName); - jg.writeStartObject(); - jg.writeFieldName("description"); - jg.writeString(descriptionStr); - jg.writeFieldName("value"); - writeObject(jg, description, value); - jg.writeEndObject(); + writer.name(attName); + writer.beginObject(); + writer.name("description").value(descriptionStr); + writer.name("value"); + writeObject(writer, value); + writer.endObject(); } else { - jg.writeFieldName(attName); - writeObject(jg, description, value); + writer.name(attName); + writeObject(writer, value); } } - private static void writeObject(final JsonGenerator jg, final boolean description, Object value) - throws IOException { - if(value == null) { - jg.writeNull(); + private static void writeObject(final JsonWriter writer, final Object value) throws IOException { + if (value == null) { + writer.nullValue(); } else { Class c = value.getClass(); if (c.isArray()) { - jg.writeStartArray(); + writer.beginArray(); int len = Array.getLength(value); for (int j = 0; j < len; j++) { Object item = Array.get(value, j); - writeObject(jg, description, item); + writeObject(writer, item); } - jg.writeEndArray(); + writer.endArray(); } else if(value instanceof Number) { Number n = (Number)value; double doubleValue = n.doubleValue(); if (Double.isNaN(doubleValue) || Double.isInfinite(doubleValue)) { - jg.writeString(n.toString()); + writer.value(n); } else { - jg.writeNumber(n.toString()); + writer.value(n.toString()); } } else if(value instanceof Boolean) { Boolean b = (Boolean)value; - jg.writeBoolean(b); + writer.value(b); } else if(value instanceof CompositeData) { CompositeData cds = (CompositeData)value; CompositeType comp = cds.getCompositeType(); Set keys = comp.keySet(); - jg.writeStartObject(); - for (String key: keys) { - writeAttribute(jg, key, null, cds.get(key)); + writer.beginObject(); + for (String key : keys) { + writeAttribute(writer, key, null, cds.get(key)); } - jg.writeEndObject(); + writer.endObject(); } else if(value instanceof TabularData) { TabularData tds = (TabularData)value; - jg.writeStartArray(); - for(Object entry : tds.values()) { - writeObject(jg, description, entry); + writer.beginArray(); + for (Object entry : tds.values()) { + writeObject(writer, entry); } - jg.writeEndArray(); + writer.endArray(); } else { - jg.writeString(value.toString()); + writer.value(value.toString()); } } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java index 182c985847..c440c079ee 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.wal; -import com.fasterxml.jackson.databind.ObjectMapper; import java.io.FileNotFoundException; import java.io.IOException; import java.io.PrintStream; @@ -36,6 +35,7 @@ import org.apache.commons.cli.ParseException; import org.apache.commons.cli.PosixParser; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; +import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -50,6 +50,8 @@ import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader; // imports for things that haven't moved yet. import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hbase.thirdparty.com.google.gson.Gson; + /** * WALPrettyPrinter prints the contents of a given WAL with a variety of @@ -79,7 +81,7 @@ public class WALPrettyPrinter { // useful for programmatic capture of JSON output private PrintStream out; // for JSON encoding - private static final ObjectMapper MAPPER = new ObjectMapper(); + private static final Gson GSON = GsonUtil.createGson().create(); private long position; @@ -313,7 +315,7 @@ public class WALPrettyPrinter { else out.print(","); // encode and print JSON - out.print(MAPPER.writeValueAsString(txn)); + out.print(GSON.toJson(txn)); } else { // Pretty output, complete with indentation by atomic action out.println("Sequence=" + txn.get("sequence") + " " diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 932f65567b..d9a78c7191 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -18,8 +18,6 @@ */ package org.apache.hadoop.hbase; -import com.fasterxml.jackson.databind.MapperFeature; -import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; import java.io.PrintStream; import java.lang.reflect.Constructor; @@ -100,6 +98,7 @@ import org.apache.hadoop.util.ToolRunner; import com.yammer.metrics.core.Histogram; import com.yammer.metrics.stats.UniformSample; +import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.htrace.Sampler; import org.apache.htrace.Trace; import org.apache.htrace.TraceScope; @@ -125,10 +124,7 @@ import org.apache.htrace.impl.ProbabilitySampler; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class PerformanceEvaluation extends Configured implements Tool { private static final Log LOG = LogFactory.getLog(PerformanceEvaluation.class.getName()); - private static final ObjectMapper MAPPER = new ObjectMapper(); - static { - MAPPER.configure(MapperFeature.SORT_PROPERTIES_ALPHABETICALLY, true); - } + private static final Gson GSON = GsonUtil.createGson().create(); public static final String TABLE_NAME = "TestTable"; public static final String FAMILY_NAME_BASE = "info"; @@ -289,8 +285,7 @@ public class PerformanceEvaluation extends Configured implements Tool { } }; - ObjectMapper mapper = new ObjectMapper(); - TestOptions opts = mapper.readValue(value.toString(), TestOptions.class); + TestOptions opts = GSON.fromJson(value.toString(), TestOptions.class); Configuration conf = HBaseConfiguration.create(context.getConfiguration()); final Connection con = ConnectionFactory.createConnection(conf); @@ -502,7 +497,7 @@ public class PerformanceEvaluation extends Configured implements Tool { TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Histogram.class, // yammer metrics - ObjectMapper.class); // jackson-mapper-asl + Gson.class); // gson TableMapReduceUtil.initCredentials(job); @@ -536,7 +531,7 @@ public class PerformanceEvaluation extends Configured implements Tool { TestOptions next = new TestOptions(opts); next.startRow = (j * perClientRows) + (i * (perClientRows/10)); next.perClientRunRows = perClientRows / 10; - String s = MAPPER.writeValueAsString(next); + String s = GSON.toJson(next); LOG.info("maptask input=" + s); int hash = h.hash(Bytes.toBytes(s)); m.put(hash, s); @@ -1930,7 +1925,7 @@ public class PerformanceEvaluation extends Configured implements Tool { InterruptedException, ClassNotFoundException { // Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do // the TestOptions introspection for us and dump the output in a readable format. - LOG.info(cmd.getSimpleName() + " test run options=" + MAPPER.writeValueAsString(opts)); + LOG.info(cmd.getSimpleName() + " test run options=" + GSON.toJson(opts)); try(Connection conn = ConnectionFactory.createConnection(getConf()); Admin admin = conn.getAdmin()) { checkTable(admin, opts); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java index 394c9d8354..e718eac64b 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java @@ -20,10 +20,11 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import com.fasterxml.jackson.databind.ObjectMapper; import java.io.IOException; import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.apache.hadoop.hbase.util.GsonUtil; +import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.junit.Test; import org.junit.experimental.categories.Category; @@ -35,10 +36,10 @@ public class TestPerformanceEvaluation { PerformanceEvaluation.TestOptions options = new PerformanceEvaluation.TestOptions(); assertTrue(!options.isAutoFlush()); options.setAutoFlush(true); - ObjectMapper mapper = new ObjectMapper(); - String optionsString = mapper.writeValueAsString(options); + Gson gson = GsonUtil.createGson().create(); + String optionsString = gson.toJson(options); PerformanceEvaluation.TestOptions optionsDeserialized = - mapper.readValue(optionsString, PerformanceEvaluation.TestOptions.class); + gson.fromJson(optionsString, PerformanceEvaluation.TestOptions.class); assertTrue(optionsDeserialized.isAutoFlush()); } diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml index 77ec2ebaea..36b72663c4 100644 --- a/hbase-testing-util/pom.xml +++ b/hbase-testing-util/pom.xml @@ -164,6 +164,10 @@ + + org.codehaus.jackson + jackson-mapper-asl + org.apache.hbase hbase-server diff --git a/pom.xml b/pom.xml index c11b42a3fd..0f3f3e5bf4 100644 --- a/pom.xml +++ b/pom.xml @@ -1301,6 +1301,7 @@ 1.0.8 2.11.6 1.46 + 3.0.0 3.1.1 1.6 @@ -1769,6 +1770,11 @@ jackson-databind ${jackson2.databind.version} + + org.apache.hbase.thirdparty + hbase-shaded-gson + ${hbase.shaded.gson.version} +