Description
df = self.spark.createDataFrame([("Alice", 50), ("Alice", 60)], ["name", "age"]) # shouldn't drop a non-null row self.assertEqual(df.dropDuplicates().count(), 2) self.assertEqual(df.dropDuplicates(["name"]).count(), 1) self.assertEqual(df.dropDuplicates(["name", "age"]).count(), 2) type_error_msg = "Parameter 'subset' must be a list of columns" with self.assertRaisesRegex(TypeError, type_error_msg): df.dropDuplicates("name")
Traceback (most recent call last):
File "/Users/s.singh/personal/spark-oss/python/pyspark/sql/tests/test_dataframe.py", line 128, in test_drop_duplicates
with self.assertRaisesRegex(TypeError, type_error_msg):
AssertionError: TypeError not raised