📅  最后修改于: 2022-03-11 14:58:38.382000             🧑  作者: Mango
df = spark.createDataFrame([
(1, "a", "xxx", None, "abc", "xyz","fgh"),
(2, "b", None, 3, "abc", "xyz","fgh"),
(3, "c", "a23", None, None, "xyz","fgh")
], ("ID","flag", "col1", "col2", "col3", "col4", "col5"))
from pyspark.sql.types import *
num_cols = [f.name for f in df.schema.fields if not isinstance(f.dataType, StringType)]
df2 = df.select([c for c in num_cols])
df2.show()
+---+----+
| ID|col2|
+---+----+
| 1|null|
| 2| 3|
| 3|null|
+---+----+