Uploaded image for project: 'Spark'
  1. Spark
  2. SPARK-25012

dataframe creation results in matcherror

    XMLWordPrintableJSON

    Details

    • Type: Bug
    • Status: Resolved
    • Priority: Major
    • Resolution: Duplicate
    • Affects Version/s: 2.3.1
    • Fix Version/s: None
    • Component/s: Input/Output
    • Labels:
      None
    • Environment:

      spark 2.3.1

      mac

      scala 2.11.12

       

      Description

      hi,

       

      running the attached code results in a 

       

      scala.MatchError: 2017-02-09 00:09:27.0 (of class java.sql.Timestamp)
      
      1. i do think this is wrong (at least i do not see the issue in my code)
      2. the error is the ein 90% of the cases (it sometimes passes). that makes me think something weird is going on

       

       

      package misc
      
      import java.sql.Timestamp
      import java.time.LocalDateTime
      import java.time.format.DateTimeFormatter
      
      import org.apache.spark.rdd.RDD
      import org.apache.spark.sql.sources._
      import org.apache.spark.sql.types.{StringType, StructField, StructType, TimestampType}
      import org.apache.spark.sql.{Row, SQLContext, SparkSession}
      
      case class LogRecord(application:String, dateTime: Timestamp, component: String, level: String, message: String)
      
      class LogRelation(val sqlContext: SQLContext, val path: String) extends BaseRelation with PrunedFilteredScan {
       override def schema: StructType = StructType(Seq(
       StructField("application", StringType, false),
       StructField("dateTime", TimestampType, false),
       StructField("component", StringType, false),
       StructField("level", StringType, false),
       StructField("message", StringType, false)))
      
       override def buildScan(requiredColumns: Array[String], filters: Array[Filter]): RDD[Row] = {
       val str = "2017-02-09T00:09:27"
       val ts =Timestamp.valueOf(LocalDateTime.parse(str, DateTimeFormatter.ISO_LOCAL_DATE_TIME))
      
       val data=List(Row("app",ts,"comp","level","mess"),Row("app",ts,"comp","level","mess"))
       sqlContext.sparkContext.parallelize(data)
       }
      }
      
      class LogDataSource extends DataSourceRegister with RelationProvider {
       override def shortName(): String = "log"
      
       override def createRelation(sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation =
       new LogRelation(sqlContext, parameters("path"))
      }
      
      object f0 extends App {
       lazy val spark: SparkSession = SparkSession.builder().master("local").appName("spark session").getOrCreate()
      
       val df = spark.read.format("log").load("hdfs:///logs")
       df.show()
      }
       
      

       

      results in the following stacktrace

       

      11:20:06 [task-result-getter-0] ERROR o.a.spark.scheduler.TaskSetManager - Task 0 in stage 0.0 failed 1 times; aborting job
      Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0, localhost, executor driver): scala.MatchError: 2017-02-09 00:09:27.0 (of class java.sql.Timestamp)
       at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:276)
       at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:275)
       at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:103)
       at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:379)
       at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$3.apply(ExistingRDD.scala:60)
       at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$3.apply(ExistingRDD.scala:57)
       at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
       at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
       at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
       at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
       at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:253)
       at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
       at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
       at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
       at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
       at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
       at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
       at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
       at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
       at org.apache.spark.scheduler.Task.run(Task.scala:109)
       at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
       at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
       at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
       at java.lang.Thread.run(Thread.java:745)
      Driver stacktrace:
       at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1602)
       at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1590)
       at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1589)
       at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
       at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
       at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1589)
       at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
       at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:831)
       at scala.Option.foreach(Option.scala:257)
       at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:831)
       at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1823)
       at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1772)
       at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1761)
       at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
       at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:642)
       at org.apache.spark.SparkContext.runJob(SparkContext.scala:2034)
       at org.apache.spark.SparkContext.runJob(SparkContext.scala:2055)
       at org.apache.spark.SparkContext.runJob(SparkContext.scala:2074)
       at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:363)
       at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
       at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3273)
       at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484)
       at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2484)
       at org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3254)
       at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
       at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3253)
       at org.apache.spark.sql.Dataset.head(Dataset.scala:2484)
       at org.apache.spark.sql.Dataset.take(Dataset.scala:2698)
       at org.apache.spark.sql.Dataset.showString(Dataset.scala:254)
       at org.apache.spark.sql.Dataset.show(Dataset.scala:723)
       at org.apache.spark.sql.Dataset.show(Dataset.scala:682)
       at org.apache.spark.sql.Dataset.show(Dataset.scala:691)
       at com.cadence.uwes.mock.bughunting.misc.f0$.delayedEndpoint$com$cadence$uwes$mock$bughunting$misc$f0$1(f1.scala:42)
       at com.cadence.uwes.mock.bughunting.misc.f0$delayedInit$body.apply(f1.scala:38)
       at scala.Function0$class.apply$mcV$sp(Function0.scala:34)
       at scala.runtime.AbstractFunction0.apply$mcV$sp(AbstractFunction0.scala:12)
       at scala.App$$anonfun$main$1.apply(App.scala:76)
       at scala.App$$anonfun$main$1.apply(App.scala:76)
       at scala.collection.immutable.List.foreach(List.scala:392)
       at scala.collection.generic.TraversableForwarder$class.foreach(TraversableForwarder.scala:35)
       at scala.App$class.main(App.scala:76)
       at com.cadence.uwes.mock.bughunting.misc.f0$.main(f1.scala:38)
       at com.cadence.uwes.mock.bughunting.misc.f0.main(f1.scala)
      Caused by: scala.MatchError: 2017-02-09 00:09:27.0 (of class java.sql.Timestamp)
       at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:276)
       at org.apache.spark.sql.catalyst.CatalystTypeConverters$StringConverter$.toCatalystImpl(CatalystTypeConverters.scala:275)
       at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:103)
       at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:379)
       at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$3.apply(ExistingRDD.scala:60)
       at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$3.apply(ExistingRDD.scala:57)
       at scala.collection.Iterator$$anon$11.next(Iterator.scala:410)
       at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
       at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
       at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$10$$anon$1.hasNext(WholeStageCodegenExec.scala:614)
       at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:253)
       at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:247)
       at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
       at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:830)
       at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
       at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
       at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
       at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:324)
       at org.apache.spark.rdd.RDD.iterator(RDD.scala:288)
       at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
       at org.apache.spark.scheduler.Task.run(Task.scala:109)
       at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:345)
       at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
       at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
       at java.lang.Thread.run(Thread.java:745)
      Process finished with exit code 1
      

       

        Attachments

          Issue Links

            Activity

              People

              • Assignee:
                Unassigned
                Reporter:
                simm uwe
              • Votes:
                0 Vote for this issue
                Watchers:
                4 Start watching this issue

                Dates

                • Created:
                  Updated:
                  Resolved: