Details
-
Sub-task
-
Status: Resolved
-
Major
-
Resolution: Incomplete
-
2.2.1, 2.3.0
-
None
Description
The following programs can throw an exception due to the 64KB JVM bytecode limit
val df = spark.sparkContext.parallelize( Seq((1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.0, 11.1, 12.2, 13.3, 14.4, 15.5, 16.6, 17.7, 18.8, 19.9, 20.0, 21.1, 22.2)), 1).toDF() df.agg( kurtosis('_1), kurtosis('_2), kurtosis('_3), kurtosis('_4), kurtosis('_5), kurtosis('_6), kurtosis('_7), kurtosis('_8), kurtosis('_9), kurtosis('_10), kurtosis('_11), kurtosis('_12), kurtosis('_13), kurtosis('_14), kurtosis('_15) ).collect df.groupBy('_22) .agg( kurtosis('_1), kurtosis('_2), kurtosis('_3), kurtosis('_4), kurtosis('_5), kurtosis('_6), kurtosis('_7), kurtosis('_8), kurtosis('_9), kurtosis('_10), kurtosis('_11), kurtosis('_12), kurtosis('_13), kurtosis('_14), kurtosis('_15) ).collect df.groupBy( round('_1, 0), round('_2, 0), round('_3, 0), round('_4, 0), round('_5, 0), round('_6, 0), round('_7, 0), round('_8, 0), round('_9, 0), round('_10, 0)) .agg( kurtosis('_1), kurtosis('_2), kurtosis('_3), kurtosis('_4), kurtosis('_5), kurtosis('_6), kurtosis('_7) ).collect
*/