diff --git itests/src/test/resources/testconfiguration.properties itests/src/test/resources/testconfiguration.properties index 53da72b..79772cf 100644 --- itests/src/test/resources/testconfiguration.properties +++ itests/src/test/resources/testconfiguration.properties @@ -696,6 +696,7 @@ minillaplocal.query.files=\ vector_join30.q,\ vector_join_filters.q,\ vector_leftsemi_mapjoin.q,\ + vector_llap_text_1.q,\ vector_mapjoin_reduce.q,\ vector_number_compare_projection.q,\ vector_partitioned_date_time.q,\ diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java index cd12a0b..6f1346d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorMapOperator.java @@ -620,6 +620,25 @@ private void setRowIdentiferToNull(VectorizedRowBatch batch) { } /* + * Flush a partially full deserializerBatch. + * @return Return true if the operator tree is not done yet. + */ + private boolean flushDeserializerBatch() throws HiveException { + if (deserializerBatch.size > 0) { + + batchCounter++; + oneRootOperator.process(deserializerBatch, 0); + deserializerBatch.reset(); + if (oneRootOperator.getDone()) { + setDone(true); + return false; + } + + } + return true; + } + + /* * Setup the context for reading from the next partition file. */ private void setupPartitionContextVars(String nominalPath) throws HiveException { @@ -672,20 +691,14 @@ private void setupPartitionContextVars(String nominalPath) throws HiveException currentReadType == VectorMapOperatorReadType.VECTOR_DESERIALIZE || currentReadType == VectorMapOperatorReadType.ROW_DESERIALIZE); - if (deserializerBatch.size > 0) { - - /* - * Clear out any rows in the batch from previous partition since we are going to change - * the repeating partition column values. - */ - batchCounter++; - oneRootOperator.process(deserializerBatch, 0); - deserializerBatch.reset(); - if (oneRootOperator.getDone()) { - setDone(true); - return; - } + /* + * Clear out any rows in the batch from previous partition since we are going to change + * the repeating partition column values. + */ + if (!flushDeserializerBatch()) { + // Operator tree is now done. + return; } /* @@ -773,6 +786,38 @@ public Deserializer getCurrentDeserializer() { return null; } + /* + * Deliver a vector batch to the operator tree. + * + * The Vectorized Input File Format reader has already set the partition column + * values, reset and filled in the batch, etc. + * + * We pass the VectorizedRowBatch through here. + * + * @return Return true if the operator tree is not done yet. + */ + private boolean deliverVectorizedRowBatch(Writable value) throws HiveException { + + batchCounter++; + if (value != null) { + VectorizedRowBatch batch = (VectorizedRowBatch) value; + numRows += batch.size; + if (hasRowIdentifier) { + if (batchContext.getRecordIdColumnVector() == null) { + setRowIdentiferToNull(batch); + } else { + batch.cols[rowIdentifierColumnNum] = batchContext.getRecordIdColumnVector(); + } + } + } + oneRootOperator.process(value, 0); + if (oneRootOperator.getDone()) { + setDone(true); + return false; + } + return true; + } + @Override public void process(Writable value) throws HiveException { @@ -798,30 +843,33 @@ public void process(Writable value) throws HiveException { try { if (currentReadType == VectorMapOperatorReadType.VECTORIZED_INPUT_FILE_FORMAT) { + if (!deliverVectorizedRowBatch(value)) { + + // Operator tree is now done. + return; + } + + } else if (value instanceof VectorizedRowBatch) { + /* - * The Vectorized Input File Format reader has already set the partition column - * values, reset and filled in the batch, etc. - * - * We pass the VectorizedRowBatch through here. + * This case can happen with LLAP. If it is able to deserialize and cache data from the + * input format, it will deliver that cached data to us as VRBs. */ - batchCounter++; - if (value != null) { - VectorizedRowBatch batch = (VectorizedRowBatch) value; - numRows += batch.size; - if (hasRowIdentifier) { - if (batchContext.getRecordIdColumnVector() == null) { - setRowIdentiferToNull(batch); - } else { - batch.cols[rowIdentifierColumnNum] = batchContext.getRecordIdColumnVector(); - } - } - } - oneRootOperator.process(value, 0); - if (oneRootOperator.getDone()) { - setDone(true); + + /* + * Clear out any rows we may have processed in row-mode for the current partition.. + */ + if (!flushDeserializerBatch()) { + + // Operator tree is now done. return; } + if (!deliverVectorizedRowBatch(value)) { + + // Operator tree is now done. + return; + } } else { /* diff --git ql/src/test/queries/clientpositive/vector_llap_text_1.q ql/src/test/queries/clientpositive/vector_llap_text_1.q new file mode 100644 index 0000000..ee660f8 --- /dev/null +++ ql/src/test/queries/clientpositive/vector_llap_text_1.q @@ -0,0 +1,41 @@ +set hive.exec.dynamic.partition.mode=nonstrict; +set hive.vectorized.execution.enabled=true; +set hive.llap.io.enabled=true; +set hive.map.aggr=false; +set hive.strict.checks.bucketing=false; +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; + +set hive.auto.convert.join=true; +set hive.auto.convert.join.noconditionaltask=true; +set hive.auto.convert.join.noconditionaltask.size=10000; + +CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; +CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE; + +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08'); + +load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); +load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08'); + +set hive.optimize.bucketingsorting=false; +insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part; + +CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE; +insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin; + +set hive.convert.join.bucket.mapjoin.tez = true; +explain vectorization detail +select a.key, a.value, b.value +from tab a join tab_part b on a.key = b.key; +select a.key, a.value, b.value +from tab a join tab_part b on a.key = b.key; + + + diff --git ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out new file mode 100644 index 0000000..05fc88c --- /dev/null +++ ql/src/test/results/clientpositive/llap/vector_llap_text_1.q.out @@ -0,0 +1,771 @@ +PREHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin(key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin +PREHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab_part +POSTHOOK: query: CREATE TABLE tab_part (key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab_part +PREHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: CREATE TABLE srcbucket_mapjoin_part (key int, value string) partitioned by (ds string) CLUSTERED BY (key) INTO 4 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@srcbucket_mapjoin_part +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj1/000001_0' INTO TABLE srcbucket_mapjoin partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: query: load data local inpath '../../data/files/bmj/000000_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000001_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000002_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: query: load data local inpath '../../data/files/bmj/000003_0' INTO TABLE srcbucket_mapjoin_part partition(ds='2008-04-08') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin_part +PREHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +PREHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab_part partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin_part +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin_part +POSTHOOK: Input: default@srcbucket_mapjoin_part@ds=2008-04-08 +POSTHOOK: Output: default@tab_part@ds=2008-04-08 +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab_part PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin_part)srcbucket_mapjoin_part.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tab +POSTHOOK: query: CREATE TABLE tab(key int, value string) PARTITIONED BY(ds STRING) CLUSTERED BY (key) INTO 2 BUCKETS STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tab +PREHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +PREHOOK: type: QUERY +PREHOOK: Input: default@srcbucket_mapjoin +PREHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +PREHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: query: insert overwrite table tab partition (ds='2008-04-08') +select key,value from srcbucket_mapjoin +POSTHOOK: type: QUERY +POSTHOOK: Input: default@srcbucket_mapjoin +POSTHOOK: Input: default@srcbucket_mapjoin@ds=2008-04-08 +POSTHOOK: Output: default@tab@ds=2008-04-08 +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).key SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: tab PARTITION(ds=2008-04-08).value SIMPLE [(srcbucket_mapjoin)srcbucket_mapjoin.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: explain vectorization detail +select a.key, a.value, b.value +from tab a join tab_part b on a.key = b.key +PREHOOK: type: QUERY +POSTHOOK: query: explain vectorization detail +select a.key, a.value, b.value +from tab a join tab_part b on a.key = b.key +POSTHOOK: type: QUERY +PLAN VECTORIZATION: + enabled: true + enabledConditionsMet: [hive.vectorized.execution.enabled IS true] + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Map 2 <- Map 1 (CUSTOM_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE + TableScan Vectorization: + native: true + vectorizationSchemaColumns: [0:key:int, 1:value:string, 2:ds:string, 3:ROW__ID:struct] + Filter Operator + Filter Vectorization: + className: VectorFilterOperator + native: true + predicateExpression: SelectColumnIsNotNull(col 0:int) + predicate: key is not null (type: boolean) + Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Select Vectorization: + className: VectorSelectOperator + native: true + projectedOutputColumnNums: [0, 1] + Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Reduce Sink Vectorization: + className: VectorReduceSinkObjectHashOperator + keyColumnNums: [0] + native: true + nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true + partitionColumnNums: [0] + valueColumnNums: [1] + Statistics: Num rows: 242 Data size: 22990 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string) + Execution mode: vectorized, llap + LLAP IO: no inputs + Map Vectorization: + enabled: true + enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true + inputFormatFeatureSupport: [DECIMAL_64] + vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled] + featureSupportInUse: [] + inputFileFormats: org.apache.hadoop.mapred.TextInputFormat + allNative: true + usesVectorUDFAdaptor: false + vectorized: true + rowBatchContext: + dataColumnCount: 2 + includeColumns: [0, 1] + dataColumns: key:int, value:string + partitionColumnCount: 1 + partitionColumns: ds:string + scratchColumnTypeNames: [] + Map 2 + Map Operator Tree: + TableScan + alias: b + Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + TableScan Vectorization: + native: true + vectorizationSchemaColumns: [0:key:int, 1:value:string, 2:ds:string, 3:ROW__ID:struct] + Filter Operator + Filter Vectorization: + className: VectorFilterOperator + native: true + predicateExpression: SelectColumnIsNotNull(col 0:int) + predicate: key is not null (type: boolean) + Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Select Vectorization: + className: VectorSelectOperator + native: true + projectedOutputColumnNums: [0, 1] + Statistics: Num rows: 500 Data size: 47500 Basic stats: COMPLETE Column stats: COMPLETE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + Map Join Vectorization: + bigTableKeyColumnNums: [0] + bigTableRetainedColumnNums: [0, 1] + bigTableValueColumnNums: [1] + className: VectorMapJoinInnerLongOperator + native: true + nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez, spark] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true + projectedOutputColumnNums: [0, 4, 1] + smallTableMapping: [4] + outputColumnNames: _col0, _col1, _col3 + input vertices: + 0 Map 1 + Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) + outputColumnNames: _col0, _col1, _col2 + Select Vectorization: + className: VectorSelectOperator + native: true + projectedOutputColumnNums: [0, 4, 1] + Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + File Sink Vectorization: + className: VectorFileSinkOperator + native: false + Statistics: Num rows: 391 Data size: 72726 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized, llap + LLAP IO: no inputs + Map Vectorization: + enabled: true + enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true + inputFormatFeatureSupport: [DECIMAL_64] + vectorizationSupportRemovedReasons: [DECIMAL_64 disabled because LLAP is enabled] + featureSupportInUse: [] + inputFileFormats: org.apache.hadoop.mapred.TextInputFormat + allNative: false + usesVectorUDFAdaptor: false + vectorized: true + rowBatchContext: + dataColumnCount: 2 + includeColumns: [0, 1] + dataColumns: key:int, value:string + partitionColumnCount: 1 + partitionColumns: ds:string + scratchColumnTypeNames: [string] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select a.key, a.value, b.value +from tab a join tab_part b on a.key = b.key +PREHOOK: type: QUERY +PREHOOK: Input: default@tab +PREHOOK: Input: default@tab@ds=2008-04-08 +PREHOOK: Input: default@tab_part +PREHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +POSTHOOK: query: select a.key, a.value, b.value +from tab a join tab_part b on a.key = b.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tab +POSTHOOK: Input: default@tab@ds=2008-04-08 +POSTHOOK: Input: default@tab_part +POSTHOOK: Input: default@tab_part@ds=2008-04-08 +#### A masked pattern was here #### +400 val_400 val_400 +448 val_448 val_448 +136 val_136 val_136 +444 val_444 val_444 +44 val_44 val_44 +80 val_80 val_80 +228 val_228 val_228 +392 val_392 val_392 +8 val_8 val_8 +260 val_260 val_260 +356 val_356 val_356 +4 val_4 val_4 +484 val_484 val_484 +28 val_28 val_28 +460 val_460 val_460 +248 val_248 val_248 +244 val_244 val_244 +116 val_116 val_116 +64 val_64 val_64 +336 val_336 val_336 +196 val_196 val_196 +156 val_156 val_156 +284 val_284 val_284 +332 val_332 val_332 +192 val_192 val_192 +20 val_20 val_20 +84 val_84 val_84 +84 val_84 val_84 +172 val_172 val_172 +172 val_172 val_172 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +480 val_480 val_480 +480 val_480 val_480 +480 val_480 val_480 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +224 val_224 val_224 +224 val_224 val_224 +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +404 val_404 val_404 +404 val_404 val_404 +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +172 val_172 val_172 +172 val_172 val_172 +84 val_84 val_84 +84 val_84 val_84 +404 val_404 val_404 +404 val_404 val_404 +176 val_176 val_176 +176 val_176 val_176 +176 val_176 val_176 +176 val_176 val_176 +316 val_316 val_316 +316 val_316 val_316 +316 val_316 val_316 +0 val_0 val_0 +0 val_0 val_0 +0 val_0 val_0 +396 val_396 val_396 +396 val_396 val_396 +396 val_396 val_396 +224 val_224 val_224 +224 val_224 val_224 +200 val_200 val_200 +200 val_200 val_200 +152 val_152 val_152 +152 val_152 val_152 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +200 val_200 val_200 +200 val_200 val_200 +24 val_24 val_24 +24 val_24 val_24 +424 val_424 val_424 +424 val_424 val_424 +424 val_424 val_424 +424 val_424 val_424 +280 val_280 val_280 +280 val_280 val_280 +288 val_288 val_288 +288 val_288 val_288 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +288 val_288 val_288 +288 val_288 val_288 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +468 val_468 val_468 +24 val_24 val_24 +24 val_24 val_24 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +280 val_280 val_280 +280 val_280 val_280 +208 val_208 val_208 +208 val_208 val_208 +208 val_208 val_208 +152 val_152 val_152 +152 val_152 val_152 +477 val_477 val_477 +341 val_341 val_341 +305 val_305 val_305 +257 val_257 val_257 +33 val_33 val_33 +389 val_389 val_389 +77 val_77 val_77 +437 val_437 val_437 +345 val_345 val_345 +189 val_189 val_189 +493 val_493 val_493 +105 val_105 val_105 +53 val_53 val_53 +453 val_453 val_453 +497 val_497 val_497 +457 val_457 val_457 +181 val_181 val_181 +57 val_57 val_57 +17 val_17 val_17 +365 val_365 val_365 +145 val_145 val_145 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +37 val_37 val_37 +37 val_37 val_37 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +125 val_125 val_125 +125 val_125 val_125 +217 val_217 val_217 +217 val_217 val_217 +309 val_309 val_309 +309 val_309 val_309 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +165 val_165 val_165 +165 val_165 val_165 +129 val_129 val_129 +129 val_129 val_129 +217 val_217 val_217 +217 val_217 val_217 +213 val_213 val_213 +213 val_213 val_213 +125 val_125 val_125 +125 val_125 val_125 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +169 val_169 val_169 +129 val_129 val_129 +129 val_129 val_129 +309 val_309 val_309 +309 val_309 val_309 +37 val_37 val_37 +37 val_37 val_37 +213 val_213 val_213 +213 val_213 val_213 +165 val_165 val_165 +165 val_165 val_165 +97 val_97 val_97 +97 val_97 val_97 +97 val_97 val_97 +97 val_97 val_97 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +233 val_233 val_233 +233 val_233 val_233 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +325 val_325 val_325 +325 val_325 val_325 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +237 val_237 val_237 +237 val_237 val_237 +321 val_321 val_321 +321 val_321 val_321 +149 val_149 val_149 +149 val_149 val_149 +321 val_321 val_321 +321 val_321 val_321 +233 val_233 val_233 +233 val_233 val_233 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +413 val_413 val_413 +413 val_413 val_413 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +149 val_149 val_149 +149 val_149 val_149 +325 val_325 val_325 +325 val_325 val_325 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +413 val_413 val_413 +413 val_413 val_413 +237 val_237 val_237 +237 val_237 val_237 +417 val_417 val_417 +417 val_417 val_417 +417 val_417 val_417 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +277 val_277 val_277 +369 val_369 val_369 +369 val_369 val_369 +369 val_369 val_369 +273 val_273 val_273 +273 val_273 val_273 +273 val_273 val_273 +194 val_194 val_194 +114 val_114 val_114 +190 val_190 val_190 +202 val_202 val_202 +158 val_158 val_158 +286 val_286 val_286 +378 val_378 val_378 +338 val_338 val_338 +374 val_374 val_374 +66 val_66 val_66 +150 val_150 val_150 +222 val_222 val_222 +310 val_310 val_310 +178 val_178 val_178 +262 val_262 val_262 +2 val_2 val_2 +402 val_402 val_402 +226 val_226 val_226 +170 val_170 val_170 +266 val_266 val_266 +482 val_482 val_482 +394 val_394 val_394 +446 val_446 val_446 +82 val_82 val_82 +86 val_86 val_86 +462 val_462 val_462 +462 val_462 val_462 +26 val_26 val_26 +26 val_26 val_26 +462 val_462 val_462 +462 val_462 val_462 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +242 val_242 val_242 +242 val_242 val_242 +242 val_242 val_242 +242 val_242 val_242 +118 val_118 val_118 +118 val_118 val_118 +282 val_282 val_282 +282 val_282 val_282 +118 val_118 val_118 +118 val_118 val_118 +282 val_282 val_282 +282 val_282 val_282 +26 val_26 val_26 +26 val_26 val_26 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +466 val_466 val_466 +134 val_134 val_134 +134 val_134 val_134 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +42 val_42 val_42 +42 val_42 val_42 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +134 val_134 val_134 +134 val_134 val_134 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +42 val_42 val_42 +42 val_42 val_42 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +318 val_318 val_318 +318 val_318 val_318 +318 val_318 val_318 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +138 val_138 val_138 +174 val_174 val_174 +174 val_174 val_174 +174 val_174 val_174 +174 val_174 val_174 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +406 val_406 val_406 +183 val_183 val_183 +323 val_323 val_323 +491 val_491 val_491 +235 val_235 val_235 +143 val_143 val_143 +11 val_11 val_11 +19 val_19 val_19 +419 val_419 val_419 +411 val_411 val_411 +275 val_275 val_275 +455 val_455 val_455 +495 val_495 val_495 +479 val_479 val_479 +163 val_163 val_163 +291 val_291 val_291 +435 val_435 val_435 +475 val_475 val_475 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +239 val_239 val_239 +239 val_239 val_239 +367 val_367 val_367 +367 val_367 val_367 +103 val_103 val_103 +103 val_103 val_103 +95 val_95 val_95 +95 val_95 val_95 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +15 val_15 val_15 +15 val_15 val_15 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +95 val_95 val_95 +95 val_95 val_95 +51 val_51 val_51 +51 val_51 val_51 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +239 val_239 val_239 +239 val_239 val_239 +103 val_103 val_103 +103 val_103 val_103 +51 val_51 val_51 +51 val_51 val_51 +459 val_459 val_459 +459 val_459 val_459 +187 val_187 val_187 +187 val_187 val_187 +187 val_187 val_187 +367 val_367 val_367 +367 val_367 val_367 +459 val_459 val_459 +459 val_459 val_459 +15 val_15 val_15 +15 val_15 val_15 +327 val_327 val_327 +327 val_327 val_327 +327 val_327 val_327 +307 val_307 val_307 +307 val_307 val_307 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +439 val_439 val_439 +439 val_439 val_439 +255 val_255 val_255 +255 val_255 val_255 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +219 val_219 val_219 +219 val_219 val_219 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +307 val_307 val_307 +307 val_307 val_307 +431 val_431 val_431 +431 val_431 val_431 +431 val_431 val_431 +35 val_35 val_35 +35 val_35 val_35 +35 val_35 val_35 +167 val_167 val_167 +167 val_167 val_167 +167 val_167 val_167 +439 val_439 val_439 +439 val_439 val_439 +219 val_219 val_219 +219 val_219 val_219 +255 val_255 val_255 +255 val_255 val_255