Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,6 @@ class VeloxTestSettings extends BackendTestSettings {
// TODO: fix on Spark-4.1
.excludeByPrefix("SPARK-53535") // see https://issues.apache.org/jira/browse/SPARK-53535
.excludeByPrefix("vectorized reader: missing all struct fields")
.excludeByPrefix("SPARK-54220") // https://issues.apache.org/jira/browse/SPARK-54220
enableSuite[GlutenParquetV1PartitionDiscoverySuite]
enableSuite[GlutenParquetV2PartitionDiscoverySuite]
enableSuite[GlutenParquetProtobufCompatibilitySuite]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,51 @@
package org.apache.spark.sql.execution.datasources.parquet

import org.apache.spark.sql._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{IntegerType, StringType, StructType}

/** A test suite that tests basic Parquet I/O. */
class GlutenParquetIOSuite extends ParquetIOSuite with GlutenSQLTestsBaseTrait {
override def testNameBlackList: Seq[String] =
Seq("SPARK-54220: vectorized reader: missing all struct fields, struct with NullType only")

override protected def testFile(fileName: String): String = {
getWorkspaceFilePath("sql", "core", "src", "test", "resources").toString + "/" + fileName
}

override protected def readResourceParquetFile(name: String): DataFrame = {
spark.read.parquet(testFile(name))
}

testGluten(
"SPARK-54220: vectorized reader: missing all struct fields, struct with NullType only") {
val data = Seq(
Tuple1((null, null)),
Tuple1((null, null)),
Tuple1(null)
)
val readSchema = new StructType().add(
"_1",
new StructType()
.add("_3", IntegerType, nullable = true)
.add("_4", StringType, nullable = true),
nullable = true)
val expectedAnswer = Row(Row(null, null)) :: Row(Row(null, null)) :: Row(null) :: Nil

withParquetFile(data) {
file =>
for (offheapEnabled <- Seq(true, false)) {
withSQLConf(
SQLConf.PARQUET_VECTORIZED_READER_NESTED_COLUMN_ENABLED.key -> "true",
SQLConf.LEGACY_PARQUET_RETURN_NULL_STRUCT_IF_ALL_FIELDS_MISSING.key -> "false",
SQLConf.COLUMN_VECTOR_OFFHEAP_ENABLED.key -> offheapEnabled.toString
) {
withAllParquetReaders {
val df = spark.read.schema(readSchema).parquet(file)
checkAnswer(df, expectedAnswer)
}
}
}
}
}
}
Loading