Details

    • Sub-task
    • Status: Resolved
    • Major
    • Resolution: Fixed
    • 3.4.0
    • 3.4.0
    • Connect
    • None

    Description

      df = self.df
      from pyspark.sql import functions
      
      rnd = df.select("key", functions.rand()).collect()
      for row in rnd:
          assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
      rndn = df.select("key", functions.randn(5)).collect()
      for row in rndn:
          assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
      
      # If the specified seed is 0, we should use it.
      # https://issues.apache.org/jira/browse/SPARK-9691
      rnd1 = df.select("key", functions.rand(0)).collect()
      rnd2 = df.select("key", functions.rand(0)).collect()
      self.assertEqual(sorted(rnd1), sorted(rnd2))
      
      rndn1 = df.select("key", functions.randn(0)).collect()
      rndn2 = df.select("key", functions.randn(0)).collect()
      self.assertEqual(sorted(rndn1), sorted(rndn2))
      Traceback (most recent call last):
        File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_functions.py", line 299, in test_rand_functions
          rnd = df.select("key", functions.rand()).collect()
        File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py", line 2917, in select
          jdf = self._jdf.select(self._jcols(*cols))
        File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py", line 2537, in _jcols
          return self._jseq(cols, _to_java_column)
        File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/dataframe.py", line 2524, in _jseq
          return _to_seq(self.sparkSession._sc, cols, converter)
        File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line 86, in _to_seq
          cols = [converter(c) for c in cols]
        File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line 86, in <listcomp>
          cols = [converter(c) for c in cols]
        File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/column.py", line 65, in _to_java_column
          raise TypeError(
      TypeError: Invalid argument, not a string or column: Column<'rand()'> of type <class 'pyspark.sql.connect.column.Column'>. For column literals, use 'lit', 'array', 'struct' or 'create_map' function.
      

      Attachments

        Activity

          People

            gurwls223 Hyukjin Kwon
            techaddict Sandeep Singh
            Votes:
            0 Vote for this issue
            Watchers:
            3 Start watching this issue

            Dates

              Created:
              Updated:
              Resolved: