scala> val df = spark.createDataFrame(Seq(
| (0, Array("a", "b", "c")),
| (1, Array("a", "b", "b", "c", "a"))
| )).toDF("id", "words")
df: org.apache.spark.sql.DataFrame = [id: int, words: array<string>]scala>
import org.apache.spark.ml.feature._
import org.apache.spark.ml.feature._
scala> val cvModel: CountVectorizerModel = new CountVectorizer().setInputCol("words").setOutputCol("features").setVocabSize(3).setMinDF(2).fit(df)
cvModel: org.apache.spark.ml.feature.CountVectorizerModel = cntVec_5edcfe4828c2
scala> sc.getPersistentRDDs
res0: scala.collection.Map[Int,org.apache.spark.rdd.RDD[_]] = Map(9 -> MapPartitionsRDD[9] at map at CountVectorizer.scala:223)