Skip to content

Commit 14d50ac

Browse files
committed
[GLUTEN-11980][CORE][TESTS] Add test cases for decimal-key joins if either side of join exists native scan fallback to vanilla.
1 parent c3165c1 commit 14d50ac

11 files changed

Lines changed: 1027 additions & 5 deletions

File tree

backends-velox/src/test/scala/org/apache/gluten/execution/FallbackSuite.scala

Lines changed: 165 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import org.apache.spark.sql.execution.{ColumnarBroadcastExchangeExec, ColumnarSh
2525
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, AQEShuffleReadExec}
2626
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
2727
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, BroadcastNestedLoopJoinExec, SortMergeJoinExec}
28+
import org.apache.spark.sql.internal.SQLConf
2829
import org.apache.spark.utils.GlutenSuiteUtils
2930

3031
import scala.collection.mutable.ArrayBuffer
@@ -66,12 +67,53 @@ class FallbackSuite extends VeloxWholeStageTransformerSuite with AdaptiveSparkPl
6667
.write
6768
.format("parquet")
6869
.saveAsTable("tmp3")
70+
// ORC files are written with DECIMAL(38, 18) (Hive's native storage precision).
71+
// tmp4/tmp5 declare DECIMAL(20, 0) pointing to the same ORC files,
72+
// so the reader must handle a precision/scale mismatch.
73+
spark
74+
.range(100)
75+
.selectExpr(
76+
"cast(id as decimal(38, 18)) as c1",
77+
"cast(id % 3 as int) as c2",
78+
"cast(id % 9 as timestamp) as c3")
79+
.write
80+
.format("orc")
81+
.saveAsTable("tmp4_wide")
82+
spark
83+
.range(100)
84+
.selectExpr(
85+
"cast(id as decimal(38, 18)) as c1",
86+
"cast(id % 3 as int) as c2",
87+
"cast(id % 5 as timestamp) as c3")
88+
.write
89+
.format("orc")
90+
.saveAsTable("tmp5_wide")
91+
val loc4 = spark
92+
.sql("DESCRIBE FORMATTED tmp4_wide")
93+
.filter("col_name = 'Location'")
94+
.select("data_type")
95+
.collect()(0)
96+
.getString(0)
97+
val loc5 = spark
98+
.sql("DESCRIBE FORMATTED tmp5_wide")
99+
.filter("col_name = 'Location'")
100+
.select("data_type")
101+
.collect()(0)
102+
.getString(0)
103+
spark.sql(
104+
s"CREATE TABLE tmp4 (c1 DECIMAL(20, 0), c2 INT, c3 TIMESTAMP) USING ORC LOCATION '$loc4'")
105+
spark.sql(
106+
s"CREATE TABLE tmp5 (c1 DECIMAL(20, 0), c2 INT, c3 TIMESTAMP) USING ORC LOCATION '$loc5'")
69107
}
70108

71109
override protected def afterAll(): Unit = {
72110
spark.sql("drop table tmp1")
73111
spark.sql("drop table tmp2")
74112
spark.sql("drop table tmp3")
113+
spark.sql("drop table tmp4_wide")
114+
spark.sql("drop table tmp5_wide")
115+
spark.sql("drop table tmp4")
116+
spark.sql("drop table tmp5")
75117

76118
super.afterAll()
77119
}
@@ -420,4 +462,127 @@ class FallbackSuite extends VeloxWholeStageTransformerSuite with AdaptiveSparkPl
420462
spark.sparkContext.removeSparkListener(listener)
421463
}
422464
}
465+
466+
test("For decimal-key joins, if one side falls back to Spark, force fallback the other side") {
467+
// ORC files are written with DECIMAL(38, 18) (Hive's native storage precision).
468+
// The metastore tables tmp4/tmp5 declare DECIMAL(20, 0) and point to the
469+
// same ORC files, so the reader must handle a precision/scale mismatch.
470+
// Selecting only c2 (INT) -> native FileSourceScanExecTransformer.
471+
// Selecting c3 (TIMESTAMP) in addition -> native validation fails ->
472+
// vanilla FileSourceScanExec.
473+
474+
// -- SortMergeJoin ------------------------------------------------------------------
475+
476+
val sql1 = "SELECT /*+ MERGE(tmp4) */ tmp4.c2 AS 4c2, tmp4.c3 AS 4c3, " +
477+
"tmp5.c2 AS 5c2, tmp5.c3 AS 5c3 FROM tmp4 JOIN tmp5 ON tmp4.c1 = tmp5.c1"
478+
withSQLConf(
479+
GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "false",
480+
GlutenConfig.COLUMNAR_SHUFFLED_HASH_JOIN_ENABLED.key -> "false") {
481+
checkAnswer(
482+
spark.sql(sql1),
483+
spark.sql(
484+
"SELECT tmp4_wide.c2 AS 4c2, tmp4_wide.c3 AS 4c3, " +
485+
"tmp5_wide.c2 AS 5c2, tmp5_wide.c3 AS 5c3 " +
486+
"FROM tmp4_wide JOIN tmp5_wide ON tmp4_wide.c1 = tmp5_wide.c1")
487+
)
488+
}
489+
490+
val sql2 = "SELECT /*+ MERGE(tmp4) */ tmp4.c2 AS 4c2, tmp4.c3 AS 4c3, " +
491+
"tmp5.c2 AS 5c2 FROM tmp4 JOIN tmp5 ON tmp4.c1 = tmp5.c1"
492+
withSQLConf(
493+
GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "false",
494+
GlutenConfig.COLUMNAR_SHUFFLED_HASH_JOIN_ENABLED.key -> "false") {
495+
checkAnswer(
496+
spark.sql(sql2),
497+
spark.sql(
498+
"SELECT tmp4_wide.c2 AS 4c2, tmp4_wide.c3 AS 4c3, " +
499+
"tmp5_wide.c2 AS 5c2 " +
500+
"FROM tmp4_wide JOIN tmp5_wide ON tmp4_wide.c1 = tmp5_wide.c1")
501+
)
502+
}
503+
504+
val sql3 = "SELECT /*+ MERGE(tmp4) */ tmp4.c2 AS 4c2, " +
505+
"tmp5.c2 AS 5c2, tmp5.c3 AS 5c3 FROM tmp4 JOIN tmp5 ON tmp4.c1 = tmp5.c1"
506+
withSQLConf(
507+
GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "false",
508+
GlutenConfig.COLUMNAR_SHUFFLED_HASH_JOIN_ENABLED.key -> "false") {
509+
checkAnswer(
510+
spark.sql(sql3),
511+
spark.sql(
512+
"SELECT tmp4_wide.c2 AS 4c2, " +
513+
"tmp5_wide.c2 AS 5c2, tmp5_wide.c3 AS 5c3 " +
514+
"FROM tmp4_wide JOIN tmp5_wide ON tmp4_wide.c1 = tmp5_wide.c1")
515+
)
516+
}
517+
518+
// -- ShuffledHashJoin ---------------------------------------------------------------
519+
520+
val sql4 = "SELECT /*+ SHUFFLE_HASH(tmp4) */ tmp4.c2 AS 4c2, tmp4.c3 AS 4c3, " +
521+
"tmp5.c2 AS 5c2, tmp5.c3 AS 5c3 FROM tmp4 JOIN tmp5 ON tmp4.c1 = tmp5.c1"
522+
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
523+
checkAnswer(
524+
spark.sql(sql4),
525+
spark.sql(
526+
"SELECT tmp4_wide.c2 AS 4c2, tmp4_wide.c3 AS 4c3, " +
527+
"tmp5_wide.c2 AS 5c2, tmp5_wide.c3 AS 5c3 " +
528+
"FROM tmp4_wide JOIN tmp5_wide ON tmp4_wide.c1 = tmp5_wide.c1")
529+
)
530+
}
531+
532+
val sql5 = "SELECT /*+ SHUFFLE_HASH(tmp4) */ tmp4.c2 AS 4c2, tmp4.c3 AS 4c3, " +
533+
"tmp5.c2 AS 5c2 FROM tmp4 JOIN tmp5 ON tmp4.c1 = tmp5.c1"
534+
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
535+
checkAnswer(
536+
spark.sql(sql5),
537+
spark.sql(
538+
"SELECT tmp4_wide.c2 AS 4c2, tmp4_wide.c3 AS 4c3, " +
539+
"tmp5_wide.c2 AS 5c2 " +
540+
"FROM tmp4_wide JOIN tmp5_wide ON tmp4_wide.c1 = tmp5_wide.c1")
541+
)
542+
}
543+
544+
val sql6 = "SELECT /*+ SHUFFLE_HASH(tmp4) */ tmp4.c2 AS 4c2, " +
545+
"tmp5.c2 AS 5c2, tmp5.c3 AS 5c3 FROM tmp4 JOIN tmp5 ON tmp4.c1 = tmp5.c1"
546+
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
547+
checkAnswer(
548+
spark.sql(sql6),
549+
spark.sql(
550+
"SELECT tmp4_wide.c2 AS 4c2, " +
551+
"tmp5_wide.c2 AS 5c2, tmp5_wide.c3 AS 5c3 " +
552+
"FROM tmp4_wide JOIN tmp5_wide ON tmp4_wide.c1 = tmp5_wide.c1")
553+
)
554+
}
555+
556+
// -- BroadcastHashJoin --------------------------------------------------------------
557+
558+
val sql7 = "SELECT tmp4.c2 AS 4c2, tmp4.c3 AS 4c3, " +
559+
"tmp5.c2 AS 5c2, tmp5.c3 AS 5c3 FROM tmp4 JOIN tmp5 ON tmp4.c1 = tmp5.c1"
560+
checkAnswer(
561+
spark.sql(sql7),
562+
spark.sql(
563+
"SELECT tmp4_wide.c2 AS 4c2, tmp4_wide.c3 AS 4c3, " +
564+
"tmp5_wide.c2 AS 5c2, tmp5_wide.c3 AS 5c3 " +
565+
"FROM tmp4_wide JOIN tmp5_wide ON tmp4_wide.c1 = tmp5_wide.c1")
566+
)
567+
568+
val sql8 = "SELECT tmp4.c2 AS 4c2, tmp4.c3 AS 4c3, " +
569+
"tmp5.c2 AS 5c2 FROM tmp4 JOIN tmp5 ON tmp4.c1 = tmp5.c1"
570+
checkAnswer(
571+
spark.sql(sql8),
572+
spark.sql(
573+
"SELECT tmp4_wide.c2 AS 4c2, tmp4_wide.c3 AS 4c3, " +
574+
"tmp5_wide.c2 AS 5c2 " +
575+
"FROM tmp4_wide JOIN tmp5_wide ON tmp4_wide.c1 = tmp5_wide.c1")
576+
)
577+
578+
val sql9 = "SELECT tmp4.c2 AS 4c2, " +
579+
"tmp5.c2 AS 5c2, tmp5.c3 AS 5c3 FROM tmp4 JOIN tmp5 ON tmp4.c1 = tmp5.c1"
580+
checkAnswer(
581+
spark.sql(sql9),
582+
spark.sql(
583+
"SELECT tmp4_wide.c2 AS 4c2, " +
584+
"tmp5_wide.c2 AS 5c2, tmp5_wide.c3 AS 5c3 " +
585+
"FROM tmp4_wide JOIN tmp5_wide ON tmp4_wide.c1 = tmp5_wide.c1")
586+
)
587+
}
423588
}

gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQuerySuite.scala

Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ import org.apache.spark.sql.catalyst.TableIdentifier
2525
import org.apache.spark.sql.execution.SparkPlan
2626
import org.apache.spark.sql.hive.{HiveExternalCatalog, HiveTableScanExecTransformer}
2727
import org.apache.spark.sql.hive.client.HiveClient
28+
import org.apache.spark.sql.internal.SQLConf
2829

2930
class GlutenHiveSQLQuerySuite extends GlutenHiveSQLQuerySuiteBase {
3031

@@ -167,4 +168,164 @@ class GlutenHiveSQLQuerySuite extends GlutenHiveSQLQuerySuiteBase {
167168
}
168169
}
169170
}
171+
172+
testGluten(
173+
"GLUTEN-11980: For decimal-key joins, " +
174+
"if one side falls back to Spark, force fallback the other side") {
175+
withSQLConf("spark.sql.hive.convertMetastoreOrc" -> "false") {
176+
withTable("htmp1_wide", "htmp2_wide", "htmp1", "htmp2") {
177+
// ORC files are written with DECIMAL(38, 18) (Hive's native storage precision).
178+
// The metastore tables htmp1/htmp2 declare DECIMAL(20, 0) and point to the
179+
// same ORC files, so the reader must handle a precision/scale mismatch.
180+
// Selecting only c2 (INT) -> native HiveTableScanExecTransformer.
181+
// Selecting c3 (TIMESTAMP) in addition -> native validation fails ->
182+
// vanilla HiveTableScanExec.
183+
sql("CREATE TABLE htmp1_wide (c1 DECIMAL(38, 18), c2 INT, c3 TIMESTAMP) STORED AS ORC")
184+
sql("CREATE TABLE htmp2_wide (c1 DECIMAL(38, 18), c2 INT, c3 TIMESTAMP) STORED AS ORC")
185+
sql("INSERT INTO htmp1_wide " +
186+
"SELECT cast(id AS DECIMAL(38, 18)), id % 3, cast(id % 9 AS TIMESTAMP) " +
187+
"FROM range(1, 101)")
188+
sql("INSERT INTO htmp2_wide " +
189+
"SELECT cast(id AS DECIMAL(38, 18)), id % 3, cast(id % 5 AS TIMESTAMP) " +
190+
"FROM range(1, 101)")
191+
val loc1 = sql("DESCRIBE FORMATTED htmp1_wide")
192+
.filter("col_name = 'Location'")
193+
.select("data_type")
194+
.collect()(0)
195+
.getString(0)
196+
val loc2 = sql("DESCRIBE FORMATTED htmp2_wide")
197+
.filter("col_name = 'Location'")
198+
.select("data_type")
199+
.collect()(0)
200+
.getString(0)
201+
sql("CREATE TABLE htmp1 (c1 DECIMAL(20, 0), c2 INT, c3 TIMESTAMP) " +
202+
s"STORED AS ORC LOCATION '$loc1'")
203+
sql("CREATE TABLE htmp2 (c1 DECIMAL(20, 0), c2 INT, c3 TIMESTAMP) " +
204+
s"STORED AS ORC LOCATION '$loc2'")
205+
206+
// -- SortMergeJoin ------------------------------------------------------------------
207+
208+
val sql1 =
209+
"SELECT /*+ MERGE(htmp1) */ htmp1.c2 AS 1c2, htmp1.c3 AS 1c3, " +
210+
"htmp2.c2 AS 2c2, htmp2.c3 AS 2c3 FROM htmp1 JOIN htmp2 ON htmp1.c1 = htmp2.c1"
211+
withSQLConf(
212+
GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "false",
213+
GlutenConfig.COLUMNAR_SHUFFLED_HASH_JOIN_ENABLED.key -> "false") {
214+
checkAnswer(
215+
spark.sql(sql1),
216+
spark.sql(
217+
"SELECT htmp1_wide.c2 AS 1c2, htmp1_wide.c3 AS 1c3, " +
218+
"htmp2_wide.c2 AS 2c2, htmp2_wide.c3 AS 2c3 " +
219+
"FROM htmp1_wide JOIN htmp2_wide ON htmp1_wide.c1 = htmp2_wide.c1")
220+
)
221+
}
222+
223+
val sql2 =
224+
"SELECT /*+ MERGE(htmp1) */ htmp1.c2 AS 1c2, htmp1.c3 AS 1c3, " +
225+
"htmp2.c2 AS 2c2 FROM htmp1 JOIN htmp2 ON htmp1.c1 = htmp2.c1"
226+
withSQLConf(
227+
GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "false",
228+
GlutenConfig.COLUMNAR_SHUFFLED_HASH_JOIN_ENABLED.key -> "false") {
229+
checkAnswer(
230+
spark.sql(sql2),
231+
spark.sql(
232+
"SELECT htmp1_wide.c2 AS 1c2, htmp1_wide.c3 AS 1c3, " +
233+
"htmp2_wide.c2 AS 2c2 " +
234+
"FROM htmp1_wide JOIN htmp2_wide ON htmp1_wide.c1 = htmp2_wide.c1")
235+
)
236+
}
237+
238+
val sql3 =
239+
"SELECT /*+ MERGE(htmp1) */ htmp1.c2 AS 1c2, " +
240+
"htmp2.c2 AS 2c2, htmp2.c3 AS 2c3 FROM htmp1 JOIN htmp2 ON htmp1.c1 = htmp2.c1"
241+
withSQLConf(
242+
GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "false",
243+
GlutenConfig.COLUMNAR_SHUFFLED_HASH_JOIN_ENABLED.key -> "false") {
244+
checkAnswer(
245+
spark.sql(sql3),
246+
spark.sql(
247+
"SELECT htmp1_wide.c2 AS 1c2, " +
248+
"htmp2_wide.c2 AS 2c2, htmp2_wide.c3 AS 2c3 " +
249+
"FROM htmp1_wide JOIN htmp2_wide ON htmp1_wide.c1 = htmp2_wide.c1")
250+
)
251+
}
252+
253+
// -- ShuffledHashJoin ---------------------------------------------------------------
254+
255+
val sql4 =
256+
"SELECT /*+ SHUFFLE_HASH(htmp1) */ htmp1.c2 AS 1c2, htmp1.c3 AS 1c3, " +
257+
"htmp2.c2 AS 2c2, htmp2.c3 AS 2c3 FROM htmp1 JOIN htmp2 ON htmp1.c1 = htmp2.c1"
258+
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
259+
checkAnswer(
260+
spark.sql(sql4),
261+
spark.sql(
262+
"SELECT htmp1_wide.c2 AS 1c2, htmp1_wide.c3 AS 1c3, " +
263+
"htmp2_wide.c2 AS 2c2, htmp2_wide.c3 AS 2c3 " +
264+
"FROM htmp1_wide JOIN htmp2_wide ON htmp1_wide.c1 = htmp2_wide.c1")
265+
)
266+
}
267+
268+
val sql5 =
269+
"SELECT /*+ SHUFFLE_HASH(htmp1) */ htmp1.c2 AS 1c2, htmp1.c3 AS 1c3, " +
270+
"htmp2.c2 AS 2c2 FROM htmp1 JOIN htmp2 ON htmp1.c1 = htmp2.c1"
271+
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
272+
checkAnswer(
273+
spark.sql(sql5),
274+
spark.sql(
275+
"SELECT htmp1_wide.c2 AS 1c2, htmp1_wide.c3 AS 1c3, " +
276+
"htmp2_wide.c2 AS 2c2 " +
277+
"FROM htmp1_wide JOIN htmp2_wide ON htmp1_wide.c1 = htmp2_wide.c1")
278+
)
279+
}
280+
281+
val sql6 =
282+
"SELECT /*+ SHUFFLE_HASH(htmp1) */ htmp1.c2 AS 1c2, " +
283+
"htmp2.c2 AS 2c2, htmp2.c3 AS 2c3 FROM htmp1 JOIN htmp2 ON htmp1.c1 = htmp2.c1"
284+
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
285+
checkAnswer(
286+
spark.sql(sql6),
287+
spark.sql(
288+
"SELECT htmp1_wide.c2 AS 1c2, " +
289+
"htmp2_wide.c2 AS 2c2, htmp2_wide.c3 AS 2c3 " +
290+
"FROM htmp1_wide JOIN htmp2_wide ON htmp1_wide.c1 = htmp2_wide.c1")
291+
)
292+
}
293+
294+
// -- BroadcastHashJoin --------------------------------------------------------------
295+
296+
val sql7 =
297+
"SELECT htmp1.c2 AS 1c2, htmp1.c3 AS 1c3, " +
298+
"htmp2.c2 AS 2c2, htmp2.c3 AS 2c3 FROM htmp1 JOIN htmp2 ON htmp1.c1 = htmp2.c1"
299+
checkAnswer(
300+
spark.sql(sql7),
301+
spark.sql(
302+
"SELECT htmp1_wide.c2 AS 1c2, htmp1_wide.c3 AS 1c3, " +
303+
"htmp2_wide.c2 AS 2c2, htmp2_wide.c3 AS 2c3 " +
304+
"FROM htmp1_wide JOIN htmp2_wide ON htmp1_wide.c1 = htmp2_wide.c1")
305+
)
306+
307+
val sql8 =
308+
"SELECT htmp1.c2 AS 1c2, htmp1.c3 AS 1c3, " +
309+
"htmp2.c2 AS 2c2 FROM htmp1 JOIN htmp2 ON htmp1.c1 = htmp2.c1"
310+
checkAnswer(
311+
spark.sql(sql8),
312+
spark.sql(
313+
"SELECT htmp1_wide.c2 AS 1c2, htmp1_wide.c3 AS 1c3, " +
314+
"htmp2_wide.c2 AS 2c2 " +
315+
"FROM htmp1_wide JOIN htmp2_wide ON htmp1_wide.c1 = htmp2_wide.c1")
316+
)
317+
318+
val sql9 =
319+
"SELECT htmp1.c2 AS 1c2, " +
320+
"htmp2.c2 AS 2c2, htmp2.c3 AS 2c3 FROM htmp1 JOIN htmp2 ON htmp1.c1 = htmp2.c1"
321+
checkAnswer(
322+
spark.sql(sql9),
323+
spark.sql(
324+
"SELECT htmp1_wide.c2 AS 1c2, " +
325+
"htmp2_wide.c2 AS 2c2, htmp2_wide.c3 AS 2c3 " +
326+
"FROM htmp1_wide JOIN htmp2_wide ON htmp1_wide.c1 = htmp2_wide.c1")
327+
)
328+
}
329+
}
330+
}
170331
}

gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQuerySuiteBase.scala

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ package org.apache.spark.sql.hive.execution
1818

1919
import org.apache.gluten.execution.TransformSupport
2020

21-
import org.apache.spark.SparkConf
21+
import org.apache.spark.{DebugFilesystem, SparkConf}
2222
import org.apache.spark.internal.config
2323
import org.apache.spark.internal.config.UI.UI_ENABLED
2424
import org.apache.spark.sql.{DataFrame, GlutenSQLTestsTrait, SparkSession}
@@ -61,6 +61,16 @@ abstract class GlutenHiveSQLQuerySuiteBase extends GlutenSQLTestsTrait {
6161
}
6262
}
6363

64+
override def afterEach(): Unit = {
65+
// Clear any file handles left open by Hive ORC's SplitGenerator background threads.
66+
// OrcInputFormat$SplitGenerator.populateAndCacheStripeDetails() opens ORC readers
67+
// via OrcFile.createReader() in background FutureTasks that are never explicitly closed
68+
// (Hive bug HIVE-17183), leaking handles into DebugFilesystem.openStreams and causing
69+
// SharedSparkSessionBase.afterEach() to abort the suite via assertNoOpenStreams().
70+
DebugFilesystem.clearOpenStreams()
71+
super.afterEach()
72+
}
73+
6474
protected def defaultSparkConf: SparkConf = {
6575
val conf = new SparkConf()
6676
.set("spark.master", "local[1]")

0 commit comments

Comments
 (0)