Skip to content

Commit 784e5a1

Browse files
committed
ArtifactManagerSuite test fixes
1 parent c1a37a3 commit 784e5a1

File tree

4 files changed

+9
-11
lines changed

4 files changed

+9
-11
lines changed

project/MimaExcludes.scala

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,10 @@ object MimaExcludes {
4545
ProblemFilters.exclude[MissingClassProblem]("org.apache.spark.util.collection.PrimitiveKeyOpenHashMap*"),
4646

4747
// [SPARK-54041][SQL] Enable Direct Passthrough Partitioning in the DataFrame API
48-
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.Dataset.repartitionById")
48+
ProblemFilters.exclude[ReversedMissingMethodProblem]("org.apache.spark.sql.Dataset.repartitionById"),
49+
50+
// [SPARK-54001][CONNECT] Replace block copying with ref-counting in ArtifactManager cloning
51+
ProblemFilters.exclude[DirectMissingMethodProblem]("org.apache.spark.sql.artifact.ArtifactManager.cachedBlockIdList")
4952
)
5053

5154
// Default exclude rules

sql/connect/client/jvm/src/test/scala/org/apache/spark/sql/connect/client/CheckConnectJvmClientCompatibility.scala

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,8 @@ object CheckConnectJvmClientCompatibility {
234234
"org.apache.spark.sql.artifact.ArtifactManager$"),
235235
ProblemFilters.exclude[MissingClassProblem](
236236
"org.apache.spark.sql.artifact.ArtifactManager$SparkContextResourceType$"),
237+
ProblemFilters.exclude[MissingClassProblem](
238+
"org.apache.spark.sql.artifact.RefCountedCacheId"),
237239

238240
// ColumnNode conversions
239241
ProblemFilters.exclude[MissingTypesProblem]("org.apache.spark.sql.SparkSession"),

sql/connect/server/src/main/scala/org/apache/spark/sql/connect/planner/SparkConnectPlanner.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ import org.apache.spark.sql.catalyst.util.{CaseInsensitiveMap, CharVarcharUtils}
6060
import org.apache.spark.sql.classic.{Catalog, DataFrameWriter, Dataset, MergeIntoWriter, RelationalGroupedDataset, SparkSession, TypedAggUtils, UserDefinedFunctionUtils}
6161
import org.apache.spark.sql.classic.ClassicConversions._
6262
import org.apache.spark.sql.connect.client.arrow.ArrowSerializer
63-
import org.apache.spark.sql.connect.common.{DataTypeProtoConverter, ForeachWriterPacket, LiteralValueProtoConverter, StorageLevelProtoConverter, StreamingListenerPacket, UdfPacket}
63+
import org.apache.spark.sql.connect.common.{DataTypeProtoConverter, ForeachWriterPacket, InvalidPlanInput, LiteralValueProtoConverter, StorageLevelProtoConverter, StreamingListenerPacket, UdfPacket}
6464
import org.apache.spark.sql.connect.config.Connect.CONNECT_GRPC_ARROW_MAX_BATCH_SIZE
6565
import org.apache.spark.sql.connect.ml.MLHandler
6666
import org.apache.spark.sql.connect.pipelines.PipelinesHandler

sql/core/src/test/scala/org/apache/spark/sql/artifact/ArtifactManagerSuite.scala

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -503,15 +503,8 @@ class ArtifactManagerSuite extends SharedSparkSession {
503503
assert(newArtifactManager.artifactPath !== artifactManager.artifactPath)
504504

505505
// Load the cached artifact
506-
val blockManager = newSession.sparkContext.env.blockManager
507-
for (sessionId <- Seq(spark.sessionUUID, newSession.sessionUUID)) {
508-
val cacheId = CacheId(sessionId, "test")
509-
try {
510-
assert(blockManager.getLocalBytes(cacheId).get.toByteBuffer().array() === testBytes)
511-
} finally {
512-
blockManager.releaseLock(cacheId)
513-
}
514-
}
506+
assert(spark.artifactManager.getCachedBlockId("test")
507+
== newArtifactManager.getCachedBlockId("test"))
515508

516509
val allFiles = Utils.listFiles(newArtifactManager.artifactPath.toFile)
517510
assert(allFiles.size() === 3)

0 commit comments

Comments
 (0)