diff --git kafka-handler/README.md kafka-handler/README.md index 753e3e3..e7761e3 100644 --- kafka-handler/README.md +++ kafka-handler/README.md @@ -50,6 +50,9 @@ ALTER TABLE SET TBLPROPERTIES ( "kafka.serde.class" = "org.apache.hadoop.hive.serde2.avro.AvroSerDe"); ``` + +If you use Confluent's Avro serialzier or deserializer with the Confluent Schema Registry, you will need to remove five bytes from the beginning of each message. These five bytes represent [a magic byte and a four-byte schema ID from the registry.](https://docs.confluent.io/current/schema-registry/serializer-formatter.html#wire-format) +This can be done by setting `"avro.serde.type"="skip"` and `"avro.serde.skip.bytes"="5"`. In this case it is also recommended to set the Avro schema either via `"avro.schema.url"="http://hostname/SimpleDocument.avsc"` or `"avro.schema.literal"="{"type" : "record","name" : "SimpleRecord","..."}`. If both properties are set then `avro.schema.literal` has higher priority. List of supported serializers and deserializers: diff --git kafka-handler/pom.xml kafka-handler/pom.xml index 6ad41de..599b5e7 100644 --- kafka-handler/pom.xml +++ kafka-handler/pom.xml @@ -80,6 +80,10 @@ kafka-clients ${kafka.version} + + org.apache.avro + avro + junit @@ -118,8 +122,27 @@ 1.7.30 test + + io.confluent + kafka-avro-serializer + 5.4.0 + test + + + org.apache.avro + avro + + + + + + confluent + http://packages.confluent.io/maven/ + + + dev-fast-build @@ -179,6 +202,10 @@ ${basedir}/src/test + org.apache.avro + avro-maven-plugin + + org.apache.maven.plugins maven-jar-plugin @@ -190,5 +217,27 @@ + + + + org.apache.avro + avro-maven-plugin + ${avro.version} + + + generate-sources + + schema + + + + + ${project.basedir}/src/resources/ + true + String + + + + diff --git kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaSerDe.java kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaSerDe.java index ffe7788..97d81e8 100644 --- kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaSerDe.java +++ kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaSerDe.java @@ -25,6 +25,7 @@ import org.apache.avro.generic.GenericRecord; import org.apache.avro.io.BinaryEncoder; import org.apache.avro.io.DatumReader; +import org.apache.avro.io.Decoder; import org.apache.avro.io.DecoderFactory; import org.apache.avro.io.EncoderFactory; import org.apache.avro.specific.SpecificDatumReader; @@ -133,12 +134,44 @@ Preconditions.checkArgument(!schemaFromProperty.isEmpty(), "Avro Schema is empty Can not go further"); Schema schema = AvroSerdeUtils.getSchemaFor(schemaFromProperty); LOG.debug("Building Avro Reader with schema {}", schemaFromProperty); - bytesConverter = new AvroBytesConverter(schema); + bytesConverter = getByteConverterForAvroDelegate(schema, tbl); } else { bytesConverter = new BytesWritableConverter(); } } + enum BytesConverterType { + SKIP, + NONE; + + static BytesConverterType fromString(String value) { + try { + return BytesConverterType.valueOf(value.trim().toUpperCase()); + } catch (Exception e){ + return NONE; + } + } + } + + BytesConverter getByteConverterForAvroDelegate(Schema schema, Properties tbl) throws SerDeException { + String avroBytesConverterPropertyName = AvroSerdeUtils.AvroTableProperties.AVRO_SERDE_TYPE.getPropName(); + String avroBytesConverterProperty = tbl.getProperty(avroBytesConverterPropertyName, + BytesConverterType.NONE.toString()); + BytesConverterType avroByteConverterType = BytesConverterType.fromString(avroBytesConverterProperty); + String avroSkipBytesPropertyName = AvroSerdeUtils.AvroTableProperties.AVRO_SERDE_SKIP_BYTES.getPropName(); + Integer avroSkipBytes = 0; + try { + Integer.parseInt(tbl.getProperty(avroSkipBytesPropertyName)); + } catch (NumberFormatException e) { + throw new SerDeException("Value of " + avroSkipBytesPropertyName + " could not be parsed into an integer properly.", e); + } + switch (avroByteConverterType) { + case SKIP: return new AvroSkipBytesConverter(schema, avroSkipBytes); + case NONE: return new AvroBytesConverter(schema); + default: throw new SerDeException("Value of " + avroBytesConverterPropertyName + " was invalid."); + } + } + @Override public Class getSerializedClass() { return delegateSerDe.getSerializedClass(); } @@ -327,7 +360,7 @@ private SubStructObjectInspector(StructObjectInspector baseOI, int toIndex) { K getWritable(byte[] value); } - private static class AvroBytesConverter implements BytesConverter { + static class AvroBytesConverter implements BytesConverter { private final Schema schema; private final DatumReader dataReader; private final GenericDatumWriter gdw = new GenericDatumWriter<>(); @@ -354,12 +387,18 @@ private SubStructObjectInspector(StructObjectInspector baseOI, int toIndex) { return valueBytes; } + Decoder getDecoder(byte[] value) throws SerDeException { + return DecoderFactory.get().binaryDecoder(value, null); + } + @Override public AvroGenericRecordWritable getWritable(byte[] value) { GenericRecord avroRecord = null; try { - avroRecord = dataReader.read(null, DecoderFactory.get().binaryDecoder(value, null)); + avroRecord = dataReader.read(null, getDecoder(value)); } catch (IOException e) { Throwables.propagate(new SerDeException(e)); + } catch (SerDeException e) { + Throwables.propagate(e); } avroGenericRecordWritable.setRecord(avroRecord); @@ -369,6 +408,30 @@ private SubStructObjectInspector(StructObjectInspector baseOI, int toIndex) { } } + /** + * Avro converter which skips the first @skipBytes of each message. + * + * This may be needed for various serializers, such as the Confluent Avro serializer, which uses the first five + * bytes to indicate a magic byte, as well as a four byte schema ID. + */ + static class AvroSkipBytesConverter extends AvroBytesConverter { + private final int skipBytes; + + AvroSkipBytesConverter(Schema schema, int skipBytes) { + super(schema); + this.skipBytes = skipBytes; + } + + @Override + Decoder getDecoder(byte[] value) throws SerDeException { + try { + return DecoderFactory.get().binaryDecoder(value, this.skipBytes, value.length - this.skipBytes, null); + } catch (ArrayIndexOutOfBoundsException e) { + throw new SerDeException("Skip bytes value is larger than the message length.", e); + } + } + } + private static class BytesWritableConverter implements BytesConverter { @Override public byte[] getBytes(BytesWritable writable) { return writable.getBytes(); diff --git kafka-handler/src/resources/SimpleRecord.avsc kafka-handler/src/resources/SimpleRecord.avsc new file mode 100644 index 0000000..47b6156 --- /dev/null +++ kafka-handler/src/resources/SimpleRecord.avsc @@ -0,0 +1,13 @@ +{ + "type" : "record", + "name" : "SimpleRecord", + "namespace" : "org.apache.hadoop.hive.kafka", + "fields" : [ { + "name" : "id", + "type" : "string" + }, { + "name" : "name", + "type" : "string" + } + ] +} diff --git kafka-handler/src/test/org/apache/hadoop/hive/kafka/AvroBytesConverterTest.java kafka-handler/src/test/org/apache/hadoop/hive/kafka/AvroBytesConverterTest.java new file mode 100644 index 0000000..9e1947e --- /dev/null +++ kafka-handler/src/test/org/apache/hadoop/hive/kafka/AvroBytesConverterTest.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.kafka; + +import com.google.common.collect.Maps; +import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient; +import io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig; +import io.confluent.kafka.serializers.KafkaAvroSerializer; +import org.apache.avro.Schema; +import org.apache.hadoop.hive.serde2.avro.AvroGenericRecordWritable; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +/** + * Test class for Hive Kafka Avro SerDe with variable bytes skipped. + */ +public class AvroBytesConverterTest { + private static SimpleRecord simpleRecord = SimpleRecord.newBuilder().setId("123").setName("test").build(); + private static byte[] simpleRecordConfluentBytes; + + @Rule + public ExpectedException exception = ExpectedException.none(); + + /** + * Use the KafkaAvroSerializer from Confluent to serialize the simpleRecord. + */ + @BeforeClass + public static void setUp() { + Map config = Maps.newHashMap(); + config.put(AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "http://localhost:8081"); + KafkaAvroSerializer avroSerializer = new KafkaAvroSerializer(new MockSchemaRegistryClient()); + avroSerializer.configure(config, false); + simpleRecordConfluentBytes = avroSerializer.serialize("temp", simpleRecord); + } + + private void runConversionTest(KafkaSerDe.AvroBytesConverter conv, byte[] serializedSimpleRecord) { + AvroGenericRecordWritable simpleRecordWritable = conv.getWritable(serializedSimpleRecord); + + Assert.assertNotNull(simpleRecordWritable); + Assert.assertEquals(SimpleRecord.class, simpleRecordWritable.getRecord().getClass()); + + SimpleRecord simpleRecordDeserialized = (SimpleRecord) simpleRecordWritable.getRecord(); + + Assert.assertNotNull(simpleRecordDeserialized); + Assert.assertEquals(simpleRecord, simpleRecordDeserialized); + } + + /** + * Tests the default case of no skipped bytes per record works properly. + */ + @Test + public void convertWithAvroBytesConverter() { + // Since the serialized version was created by Confluent, + // let's remove the first five bytes to get the actual message. + int recordLength = simpleRecordConfluentBytes.length; + byte[] simpleRecordWithNoOffset = Arrays.copyOfRange(simpleRecordConfluentBytes, 5, recordLength); + + Schema schema = SimpleRecord.getClassSchema(); + KafkaSerDe.AvroBytesConverter conv = new KafkaSerDe.AvroBytesConverter(schema); + runConversionTest(conv, simpleRecordWithNoOffset); + } + + /** + * Tests that the skip converter skips 5 bytes properly, which matches what Confluent needs. + */ + @Test + public void convertWithConfluentAvroBytesConverter() { + Schema schema = SimpleRecord.getClassSchema(); + KafkaSerDe.AvroSkipBytesConverter conv = new KafkaSerDe.AvroSkipBytesConverter(schema, 5); + runConversionTest(conv, simpleRecordConfluentBytes); + } + + /** + * Tests that the skip converter skips a custom number of bytes properly. + */ + @Test + public void convertWithCustomAvroSkipBytesConverter() { + int offset = 2; + // Remove all but two bytes of the five byte offset which Confluent adds, + // to simulate a message with only 2 bytes in front of each message. + int recordLength = simpleRecordConfluentBytes.length; + byte[] simpleRecordAsOffsetBytes = Arrays.copyOfRange(simpleRecordConfluentBytes, 5 - offset, recordLength); + + Schema schema = SimpleRecord.getClassSchema(); + KafkaSerDe.AvroSkipBytesConverter conv = new KafkaSerDe.AvroSkipBytesConverter(schema, offset); + runConversionTest(conv, simpleRecordAsOffsetBytes); + } + + /** + * Test that when we skip more bytes than are in the message, we throw an exception properly. + */ + @Test + public void skipBytesLargerThanMessageSizeConverter() { + // The simple record we are serializing is two strings, that combine to be 7 characters or 14 bytes. + // Adding in the 5 byte offset, we get 19 bytes. To make sure we go bigger than that, we are setting + // the offset to ten times that value. + int offset = 190; + + Schema schema = SimpleRecord.getClassSchema(); + KafkaSerDe.AvroSkipBytesConverter conv = new KafkaSerDe.AvroSkipBytesConverter(schema, offset); + + exception.expect(RuntimeException.class); + exception.expectMessage("org.apache.hadoop.hive.serde2.SerDeException: " + + "Skip bytes value is larger than the message length."); + runConversionTest(conv, simpleRecordConfluentBytes); + } + + /** + * Test that we properly parse the converter type, no matter the casing. + */ + @Test + public void bytesConverterTypeParseTest() { + Map testCases = new HashMap() {{ + put("skip", KafkaSerDe.BytesConverterType.SKIP); + put("sKIp", KafkaSerDe.BytesConverterType.SKIP); + put("SKIP", KafkaSerDe.BytesConverterType.SKIP); + put(" skip ", KafkaSerDe.BytesConverterType.SKIP); + put("SKIP1", KafkaSerDe.BytesConverterType.NONE); + put("skipper", KafkaSerDe.BytesConverterType.NONE); + put("", KafkaSerDe.BytesConverterType.NONE); + put(null, KafkaSerDe.BytesConverterType.NONE); + put("none", KafkaSerDe.BytesConverterType.NONE); + put("NONE", KafkaSerDe.BytesConverterType.NONE); + put(" none ", KafkaSerDe.BytesConverterType.NONE); + }}; + + for(Map.Entry entry: testCases.entrySet()) { + Assert.assertEquals(entry.getValue(), KafkaSerDe.BytesConverterType.fromString(entry.getKey())); + } + } +} diff --git serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java index d16abdb..b540073 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java +++ serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java @@ -68,6 +68,8 @@ SCHEMA_NAME("avro.schema.name"), SCHEMA_DOC("avro.schema.doc"), AVRO_SERDE_SCHEMA("avro.serde.schema"), + AVRO_SERDE_TYPE("avro.serde.type"), + AVRO_SERDE_SKIP_BYTES("avro.serde.skip.bytes"), SCHEMA_RETRIEVER("avro.schema.retriever"); private final String propName;