Uploaded image for project: 'IMPALA'
  1. IMPALA
  2. IMPALA-6102

FilterContext::CheckForAlwaysFalse references FilterContext.stats which can be null resulting in crash

    XMLWordPrintableJSON

Details

    • Bug
    • Status: Resolved
    • Blocker
    • Resolution: Duplicate
    • None
    • None
    • Backend
    • None
    • ghx-label-7

    Description

      This seems related to fix for IMPALA-5789

      If the backend doesn't crash scanner threads end up spinning in

      #0  0x00000036ed60e3f5 in __lll_unlock_wake () from /lib64/libpthread.so.0
      #1  0x00000036ed60a877 in _L_unlock_657 () from /lib64/libpthread.so.0
      #2  0x00000036ed60a7df in pthread_mutex_unlock () from /lib64/libpthread.so.0
      #3  0x000000000090a868 in boost::mutex::unlock() ()
      #4  0x0000000000d2dd8f in impala::HdfsScanNode::ScannerThread() ()
      #5  0x0000000000cc3ca2 in impala::Thread::SuperviseThread(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, boost::function<void ()()>, impala::Promise<long>*) ()
      #6  0x0000000000cc4404 in boost::detail::thread_data<boost::_bi::bind_t<void, void (*)(std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, std::basic_string<char, std::char_traits<char>, std::allocator<char> > const&, boost::function<void ()()>, impala::Promise<long>*), boost::_bi::list4<boost::_bi::value<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >, boost::_bi::value<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >, boost::_bi::value<boost::function<void ()()> >, boost::_bi::value<impala::Promise<long>*> > > >::run() ()
      #7  0x000000000104e18a in thread_proxy ()
      #8  0x00000036ed6079d1 in start_thread () from /lib64/libpthread.so.0
      #9  0x00000036ed2e88fd in clone () from /lib64/libc.so.6
      

      Query

      select sum(SR_RETURN_AMT) as ctr_total_return
      from store_returns
      ,date_dim
      where sr_returned_date_sk = d_date_sk
      and d_year =2002
      group by sr_customer_sk
      ,sr_store_sk
      

      Plan

      F03:PLAN FRAGMENT [UNPARTITIONED] hosts=1 instances=1
      |  Per-Host Resources: mem-estimate=0B mem-reservation=0B
      PLAN-ROOT SINK
      |  mem-estimate=0B mem-reservation=0B
      |
      07:EXCHANGE [UNPARTITIONED]
      |  mem-estimate=0B mem-reservation=0B
      |  tuple-ids=2 row-size=24B cardinality=53604747
      |
      F02:PLAN FRAGMENT [HASH(sr_customer_sk,sr_store_sk)] hosts=14 instances=14
      Per-Host Resources: mem-estimate=785.24MB mem-reservation=34.00MB
      06:AGGREGATE [FINALIZE]
      |  output: sum:merge(SR_RETURN_AMT)
      |  group by: sr_customer_sk, sr_store_sk
      |  mem-estimate=785.24MB mem-reservation=34.00MB spill-buffer=2.00MB
      |  tuple-ids=2 row-size=24B cardinality=53604747
      |
      05:EXCHANGE [HASH(sr_customer_sk,sr_store_sk)]
      |  mem-estimate=0B mem-reservation=0B
      |  tuple-ids=2 row-size=24B cardinality=53604747
      |
      F00:PLAN FRAGMENT [RANDOM] hosts=14 instances=14
      Per-Host Resources: mem-estimate=1.39GB mem-reservation=35.94MB
      03:AGGREGATE [STREAMING]
      |  output: sum(SR_RETURN_AMT)
      |  group by: sr_customer_sk, sr_store_sk
      |  mem-estimate=1.32GB mem-reservation=34.00MB spill-buffer=2.00MB
      |  tuple-ids=2 row-size=24B cardinality=53604747
      |
      02:HASH JOIN [INNER JOIN, BROADCAST]
      |  hash predicates: sr_returned_date_sk = d_date_sk
      |  fk/pk conjuncts: sr_returned_date_sk = d_date_sk
      |  runtime filters: RF000 <- d_date_sk
      |  mem-estimate=1.94MB mem-reservation=1.94MB spill-buffer=64.00KB
      |  tuple-ids=0,1 row-size=24B cardinality=53604747
      |
      |--04:EXCHANGE [BROADCAST]
      |  |  mem-estimate=0B mem-reservation=0B
      |  |  tuple-ids=1 row-size=8B cardinality=373
      |  |
      |  F01:PLAN FRAGMENT [RANDOM] hosts=1 instances=1
      |  Per-Host Resources: mem-estimate=32.00MB mem-reservation=0B
      |  01:SCAN HDFS [tpcds_1000_parquet.date_dim, RANDOM]
      |     partitions=1/1 files=1 size=2.17MB
      |     predicates: d_year = 2002
      |     stats-rows=73049 extrapolated-rows=73049
      |     table stats: rows=73049 size=2.17MB
      |     column stats: all
      |     parquet statistics predicates: d_year = 2002
      |     parquet dictionary predicates: d_year = 2002
      |     mem-estimate=32.00MB mem-reservation=0B
      |     tuple-ids=1 row-size=8B cardinality=373
      |
      00:SCAN HDFS [tpcds_1000_parquet.store_returns, RANDOM]
         partitions=2004/2004 files=2004 size=18.74GB
         runtime filters: RF000 -> sr_returned_date_sk
         stats-rows=287999764 extrapolated-rows=unavailable
         table stats: rows=287999764 size=unavailable
         column stats: all
         mem-estimate=72.00MB mem-reservation=0B
         tuple-ids=0 row-size=16B cardinality=287999764
      

      Stack

      0  0x0000003188632625 in raise () from /lib64/libc.so.6
      #1  0x0000003188633e05 in abort () from /lib64/libc.so.6
      #2  0x00007f7e175bca55 in os::abort(bool) () from /usr/java/jdk1.7.0_67-cloudera/jre/lib/amd64/server/libjvm.so
      #3  0x00007f7e1773cf87 in VMError::report_and_die() () from /usr/java/jdk1.7.0_67-cloudera/jre/lib/amd64/server/libjvm.so
      #4  0x00007f7e1773d50e in crash_handler(int, siginfo*, void*) () from /usr/java/jdk1.7.0_67-cloudera/jre/lib/amd64/server/libjvm.so
      #5  0x00007f7e175bbbf2 in os::Linux::chained_handler(int, siginfo*, void*) ()
         from /usr/java/jdk1.7.0_67-cloudera/jre/lib/amd64/server/libjvm.so
      #6  0x00007f7e175c18d6 in JVM_handle_linux_signal () from /usr/java/jdk1.7.0_67-cloudera/jre/lib/amd64/server/libjvm.so
      #7  <signal handler called>
      #8  0x00007f7e175dcce9 in ParallelScavengeHeap::block_start(void const*) const ()
         from /usr/java/jdk1.7.0_67-cloudera/jre/lib/amd64/server/libjvm.so
      #9  0x00007f7e175b42b0 in os::print_location(outputStream*, long, bool) ()
      #10 0x00007f7e175c1212 in os::print_register_info(outputStream*, void*) ()
      #11 0x00007f7e1773b57c in VMError::report(outputStream*) () from /usr/java/jdk1.7.0_67-cloudera/jre/lib/amd64/server/libjvm.so
      #12 0x00007f7e1773cb8a in VMError::report_and_die() () from /usr/java/jdk1.7.0_67-cloudera/jre/lib/amd64/server/libjvm.so
      #13 0x00007f7e175c196f in JVM_handle_linux_signal () from /usr/java/jdk1.7.0_67-cloudera/jre/lib/amd64/server/libjvm.so
      #14 <signal handler called>
      #15 impala::FilterStats::IncrCounters (this=Unhandled dwarf expression opcode 0xf3)    at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/be/src/exec/filter-context.cc:50
      #16 0x0000000000df5f3a in impala::FilterContext::CheckForAlwaysFalse (stats_name=Unhandled dwarf expression opcode 0xf3)    at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/be/src/exec/filter-context.cc:382
      #17 0x0000000000d350f2 in impala::HdfsScanNodeBase::PartitionPassesFilters (this=0xee69800, partition_id=1575, stats_name=...,    filter_ctxs=...) at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/be/src/exec/hdfs-scan-node-base.cc:649
      #18 0x0000000000d66a1e in impala::HdfsParquetScanner::GetNextInternal (this=0x131fdc00, row_batch=0x102f4e70)    at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/be/src/exec/hdfs-parquet-scanner.cc:502
      #19 0x0000000000d60f27 in impala::HdfsParquetScanner::ProcessSplit (this=0x131fdc00)
          at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/be/src/exec/hdfs-parquet-scanner.cc:405
      #20 0x0000000000d2c756 in impala::HdfsScanNode::ProcessSplit (this=0x723bdfc603f99250, filter_ctxs=..., expr_results_pool=0x1,
      ---Type <return> to continue, or q <return> to quit---
          scan_range=0x1) at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/be/src/exec/hdfs-scan-node.cc:533
      #21 0x0000000000d2e4a4 in impala::HdfsScanNode::ScannerThread (this=0xee69800)
          at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/be/src/exec/hdfs-scan-node.cc:442
      
      #22 0x0000000000cc3ca2 in operator() (name=Unhandled dwarf expression opcode 0xf3
      )
          at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/toolchain/boost-1.57.0-p3/include/boost/function/function_template.hpp:767
      #23 impala::Thread::SuperviseThread (name=Unhandled dwarf expression opcode 0xf3
      ) at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/be/src/util/thread.cc:352
      #24 0x0000000000cc4404 in operator()<void (*)(const std::basic_string<char>&, const std::basic_string<char>&, boost::function<void()>, impala::Promise<long int>*), boost::_bi::list0> (this=0x12b5e400)
          at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/toolchain/boost-1.57.0-p3/include/boost/bind/bind.hpp:457
      #25 operator() (this=0x12b5e400)
          at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/toolchain/boost-1.57.0-p3/include/boost/bind/bind_template.hpp:20
      #26 boost::detail::thread_data<boost::_bi::bind_t<void, void (*)(const std::basic_string<char, std::char_traits<char>, std::allocator<char> >&, const std::basic_string<char, std::char_traits<char>, std::allocator<char> >&, boost::function<void()>, impala::Promise<long int>*), boost::_bi::list4<boost::_bi::value<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >, boost::_bi::value<std::basic_string<char, std::char_traits<char>, std::allocator<char> > >, boost::_bi::value<boost::function<void()> >, boost::_bi::value<impala::Promise<long int>*> > > >::run(void) (this=0x12b5e400)
          at /data/jenkins/workspace/impala-private-build-binaries/repos/Impala/toolchain/boost-1.57.0-p3/include/boost/thread/detail/thread.hpp:116
      #27 0x000000000104e18a in thread_proxy ()
      #28 0x0000003188a079d1 in start_thread () from /lib64/libpthread.so.0
      

      Attachments

        1. IMPALA-6102.txt
          224 kB
          Mostafa Mokhtar

        Activity

          People

            tianyiwang Tianyi Wang
            mmokhtar Mostafa Mokhtar
            Votes:
            0 Vote for this issue
            Watchers:
            2 Start watching this issue

            Dates

              Created:
              Updated:
              Resolved: