diff --git a/backends-velox/src/test/scala/org/apache/gluten/config/AllVeloxConfiguration.scala b/backends-velox/src/test/scala/org/apache/gluten/config/AllVeloxConfiguration.scala index 65059972b97f..5d8ba2b397a6 100644 --- a/backends-velox/src/test/scala/org/apache/gluten/config/AllVeloxConfiguration.scala +++ b/backends-velox/src/test/scala/org/apache/gluten/config/AllVeloxConfiguration.scala @@ -42,8 +42,8 @@ class AllVeloxConfiguration extends AnyFunSuite { s""" |## Gluten Velox backend configurations | - | Key | Default | Description - | --- | --- | --- + | Key | Status | Default | Description + | --- | --- | --- | --- |""" VeloxConfig.allEntries @@ -53,7 +53,11 @@ class AllVeloxConfiguration extends AnyFunSuite { .foreach { entry => val dft = entry.defaultValueString.replace("<", "<").replace(">", ">") - builder += Seq(s"${entry.key}", s"$dft", s"${entry.doc}") + builder += Seq( + s"${entry.key}", + AllGlutenConfiguration.configStatus(entry), + s"$dft", + s"${entry.doc}") .mkString("|") } @@ -61,8 +65,8 @@ class AllVeloxConfiguration extends AnyFunSuite { s""" |## Gluten Velox backend *experimental* configurations | - | Key | Default | Description - | --- | --- | --- + | Key | Status | Default | Description + | --- | --- | --- | --- |""" VeloxConfig.allEntries @@ -72,7 +76,11 @@ class AllVeloxConfiguration extends AnyFunSuite { .foreach { entry => val dft = entry.defaultValueString.replace("<", "<").replace(">", ">") - builder += Seq(s"${entry.key}", s"$dft", s"${entry.doc}") + builder += Seq( + s"${entry.key}", + AllGlutenConfiguration.configStatus(entry), + s"$dft", + s"${entry.doc}") .mkString("|") } diff --git a/docs/Configuration.md b/docs/Configuration.md index b32941150554..1c903369f25b 100644 --- a/docs/Configuration.md +++ b/docs/Configuration.md @@ -9,152 +9,152 @@ nav_order: 15 ## Spark base configurations for Gluten plugin -| Key | Recommend Setting | Description | -|-------------------------------|------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| spark.plugins | org.apache.gluten.GlutenPlugin | To load Gluten's components by Spark's plug-in loader. | -| spark.memory.offHeap.enabled | true | Gluten use off-heap memory for certain operations. | -| spark.memory.offHeap.size | 30G | The absolute amount of memory which can be used for off-heap allocation, in bytes unless otherwise specified.
Note: Gluten Plugin will leverage this setting to allocate memory space for native usage even offHeap is disabled.
The value is based on your system and it is recommended to set it larger if you are facing Out of Memory issue in Gluten Plugin. | -| spark.shuffle.manager | org.apache.spark.shuffle.sort.ColumnarShuffleManager | To turn on Gluten Columnar Shuffle Plugin. | -| spark.driver.extraClassPath | /path/to/gluten_jar_file | Gluten Plugin jar file to prepend to the classpath of the driver. | -| spark.executor.extraClassPath | /path/to/gluten_jar_file | Gluten Plugin jar file to prepend to the classpath of executors. | +| Key | Status | Recommend Setting | Description | +|-------------------------------|----------|------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| spark.plugins | ⚓ Static | org.apache.gluten.GlutenPlugin | To load Gluten's components by Spark's plug-in loader. | +| spark.memory.offHeap.enabled | ⚓ Static | true | Gluten use off-heap memory for certain operations. | +| spark.memory.offHeap.size | ⚓ Static | 30G | The absolute amount of memory which can be used for off-heap allocation, in bytes unless otherwise specified.
Note: Gluten Plugin will leverage this setting to allocate memory space for native usage even offHeap is disabled.
The value is based on your system and it is recommended to set it larger if you are facing Out of Memory issue in Gluten Plugin. | +| spark.shuffle.manager | ⚓ Static | org.apache.spark.shuffle.sort.ColumnarShuffleManager | To turn on Gluten Columnar Shuffle Plugin. | +| spark.driver.extraClassPath | ⚓ Static | /path/to/gluten_jar_file | Gluten Plugin jar file to prepend to the classpath of the driver. | +| spark.executor.extraClassPath | ⚓ Static | /path/to/gluten_jar_file | Gluten Plugin jar file to prepend to the classpath of executors. | ## Gluten configurations -| Key | Default | Description | -|--------------------------------------------------------------------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| spark.gluten.costModel | legacy | The class name of user-defined cost model that will be used by Gluten's transition planner. If not specified, a legacy built-in cost model will be used. The legacy cost model helps RAS planner exhaustively offload computations, and helps transition planner choose columnar-to-columnar transition over others. | -| spark.gluten.enabled | true | Whether to enable gluten. Default value is true. Just an experimental property. Recommend to enable/disable Gluten through the setting for spark.plugins. | -| spark.gluten.execution.resource.expired.time | 86400 | Expired time of execution with resource relation has cached. | -| spark.gluten.expression.blacklist | <undefined> | A black list of expression to skip transform, multiple values separated by commas. | -| spark.gluten.loadLibFromJar | false | Whether to load shared libraries from jars. | -| spark.gluten.loadLibOS | <undefined> | The shared library loader's OS name. | -| spark.gluten.loadLibOSVersion | <undefined> | The shared library loader's OS version. | -| spark.gluten.memory.isolation | false | Enable isolated memory mode. If true, Gluten controls the maximum off-heap memory can be used by each task to X, X = executor memory / max task slots. It's recommended to set true if Gluten serves concurrent queries within a single session, since not all memory Gluten allocated is guaranteed to be spillable. In the case, the feature should be enabled to avoid OOM. | -| spark.gluten.memory.overAcquiredMemoryRatio | 0.3 | If larger than 0, Velox backend will try over-acquire this ratio of the total allocated memory as backup to avoid OOM. | -| spark.gluten.memory.reservationBlockSize | 8MB | Block size of native reservation listener reserve memory from Spark. | -| spark.gluten.numTaskSlotsPerExecutor | -1 | Must provide default value since non-execution operations (e.g. org.apache.spark.sql.Dataset#summary) doesn't propagate configurations using org.apache.spark.sql.execution.SQLExecution#withSQLConfPropagated | -| spark.gluten.shuffleWriter.bufferSize | <undefined> | -| spark.gluten.soft-affinity.duplicateReading.maxCacheItems | 10000 | Enable Soft Affinity duplicate reading detection | -| spark.gluten.soft-affinity.duplicateReadingDetect.enabled | false | If true, Enable Soft Affinity duplicate reading detection | -| spark.gluten.soft-affinity.enabled | false | Whether to enable Soft Affinity scheduling. | -| spark.gluten.soft-affinity.min.target-hosts | 1 | For on HDFS, if there are already target hosts, and then prefer to use the original target hosts to schedule | -| spark.gluten.soft-affinity.replications.num | 2 | Calculate the number of the replications for scheduling to the target executors per file | -| spark.gluten.sql.adaptive.costEvaluator.enabled | true | If true, use org.apache.spark.sql.execution.adaptive.GlutenCostEvaluator as custom cost evaluator class, else follow the configuration spark.sql.adaptive.customCostEvaluatorClass. | -| spark.gluten.sql.ansiFallback.enabled | true | When true (default), Gluten will fall back to Spark when ANSI mode is enabled. When false, Gluten will attempt to execute in ANSI mode. | -| spark.gluten.sql.broadcastNestedLoopJoinTransformerEnabled | true | Config to enable BroadcastNestedLoopJoinExecTransformer. | -| spark.gluten.sql.cacheWholeStageTransformerContext | false | When true, `WholeStageTransformer` will cache the `WholeStageTransformerContext` when executing. It is used to get substrait plan node and native plan string. | -| spark.gluten.sql.cartesianProductTransformerEnabled | true | Config to enable CartesianProductExecTransformer. | -| spark.gluten.sql.collapseGetJsonObject.enabled | false | Collapse nested get_json_object functions as one for optimization. | -| spark.gluten.sql.columnar.appendData | true | Enable or disable columnar v2 command append data. | -| spark.gluten.sql.columnar.arrowUdf | true | Enable or disable columnar arrow udf. | -| spark.gluten.sql.columnar.batchscan | true | Enable or disable columnar batchscan. | -| spark.gluten.sql.columnar.broadcastExchange | true | Enable or disable columnar broadcastExchange. | -| spark.gluten.sql.columnar.broadcastJoin | true | Enable or disable columnar broadcastJoin. | -| spark.gluten.sql.columnar.cast.avg | true | -| spark.gluten.sql.columnar.coalesce | true | Enable or disable columnar coalesce. | -| spark.gluten.sql.columnar.collectLimit | true | Enable or disable columnar collectLimit. | -| spark.gluten.sql.columnar.collectTail | true | Enable or disable columnar collectTail. | -| spark.gluten.sql.columnar.enableNestedColumnPruningInHiveTableScan | true | Enable or disable nested column pruning in hivetablescan. | -| spark.gluten.sql.columnar.enableVanillaVectorizedReaders | true | Enable or disable vanilla vectorized scan. | -| spark.gluten.sql.columnar.executor.libpath || The gluten executor library path. | -| spark.gluten.sql.columnar.expand | true | Enable or disable columnar expand. | -| spark.gluten.sql.columnar.fallback.expressions.threshold | 50 | Fall back filter/project if number of nested expressions reaches this threshold, considering Spark codegen can bring better performance for such case. | -| spark.gluten.sql.columnar.fallback.ignoreRowToColumnar | true | When true, the fallback policy ignores the RowToColumnar when counting fallback number. | -| spark.gluten.sql.columnar.fallback.preferColumnar | true | When true, the fallback policy prefers to use Gluten plan rather than vanilla Spark plan if the both of them contains ColumnarToRow and the vanilla Spark plan ColumnarToRow number is not smaller than Gluten plan. | -| spark.gluten.sql.columnar.filescan | true | Enable or disable columnar filescan. | -| spark.gluten.sql.columnar.filter | true | Enable or disable columnar filter. | -| spark.gluten.sql.columnar.force.hashagg | true | Whether to force to use gluten's hash agg for replacing vanilla spark's sort agg. | -| spark.gluten.sql.columnar.forceShuffledHashJoin | true | -| spark.gluten.sql.columnar.generate | true | -| spark.gluten.sql.columnar.hashagg | true | Enable or disable columnar hashagg. | -| spark.gluten.sql.columnar.hivetablescan | true | Enable or disable columnar hivetablescan. | -| spark.gluten.sql.columnar.libname | gluten | The gluten library name. | -| spark.gluten.sql.columnar.libpath || The gluten library path. | -| spark.gluten.sql.columnar.limit | true | -| spark.gluten.sql.columnar.maxBatchSize | 4096 | -| spark.gluten.sql.columnar.overwriteByExpression | true | Enable or disable columnar v2 command overwrite by expression. | -| spark.gluten.sql.columnar.overwritePartitionsDynamic | true | Enable or disable columnar v2 command overwrite partitions dynamic. | -| spark.gluten.sql.columnar.parquet.write.blockSize | 128MB | -| spark.gluten.sql.columnar.partial.generate | true | Evaluates the non-offload-able HiveUDTF using vanilla Spark generator | -| spark.gluten.sql.columnar.partial.project | true | Break up one project node into 2 phases when some of the expressions are non offload-able. Phase one is a regular offloaded project transformer that evaluates the offload-able expressions in native, phase two preserves the output from phase one and evaluates the remaining non-offload-able expressions using vanilla Spark projections | -| spark.gluten.sql.columnar.physicalJoinOptimizationLevel | 12 | Fallback to row operators if there are several continuous joins. | -| spark.gluten.sql.columnar.physicalJoinOptimizationOutputSize | 52 | Fallback to row operators if there are several continuous joins and matched output size. | -| spark.gluten.sql.columnar.physicalJoinOptimizeEnable | false | Enable or disable columnar physicalJoinOptimize. | -| spark.gluten.sql.columnar.preferStreamingAggregate | true | Velox backend supports `StreamingAggregate`. `StreamingAggregate` uses the less memory as it does not need to hold all groups in memory, so it could avoid spill. When true and the child output ordering satisfies the grouping key then Gluten will choose `StreamingAggregate` as the native operator. | -| spark.gluten.sql.columnar.project | true | Enable or disable columnar project. | -| spark.gluten.sql.columnar.project.collapse | true | Combines two columnar project operators into one and perform alias substitution | -| spark.gluten.sql.columnar.query.fallback.threshold | -1 | The threshold for whether query will fall back by counting the number of ColumnarToRow & vanilla leaf node. | -| spark.gluten.sql.columnar.range | true | Enable or disable columnar range. | -| spark.gluten.sql.columnar.replaceData | true | Enable or disable columnar v2 command replace data. | -| spark.gluten.sql.columnar.scanOnly | false | When enabled, only scan and the filter after scan will be offloaded to native. | -| spark.gluten.sql.columnar.shuffle | true | Enable or disable columnar shuffle. | -| spark.gluten.sql.columnar.shuffle.celeborn.fallback.enabled | true | If enabled, fall back to ColumnarShuffleManager when celeborn service is unavailable.Otherwise, throw an exception. | -| spark.gluten.sql.columnar.shuffle.celeborn.useRssSort | true | If true, use RSS sort implementation for Celeborn sort-based shuffle.If false, use Gluten's row-based sort implementation. Only valid when `spark.celeborn.client.spark.shuffle.writer` is set to `sort`. | -| spark.gluten.sql.columnar.shuffle.codec | <undefined> | By default, the supported codecs are lz4 and zstd. When spark.gluten.sql.columnar.shuffle.codecBackend=qat,the supported codecs are gzip and zstd. | -| spark.gluten.sql.columnar.shuffle.codecBackend | <undefined> | -| spark.gluten.sql.columnar.shuffle.compression.threshold | 100 | If number of rows in a batch falls below this threshold, will copy all buffers into one buffer to compress. | -| spark.gluten.sql.columnar.shuffle.dictionary.enabled | false | Enable dictionary in hash-based shuffle. | -| spark.gluten.sql.columnar.shuffle.merge.threshold | 0.25 | -| spark.gluten.sql.columnar.shuffle.readerBufferSize | 1MB | Buffer size in bytes for shuffle reader reading input stream from local or remote. | -| spark.gluten.sql.columnar.shuffle.realloc.threshold | 0.25 | -| spark.gluten.sql.columnar.shuffle.sort.columns.threshold | 100000 | The threshold to determine whether to use sort-based columnar shuffle. Sort-based shuffle will be used if the number of columns is greater than this threshold. | -| spark.gluten.sql.columnar.shuffle.sort.deserializerBufferSize | 1MB | Buffer size in bytes for sort-based shuffle reader deserializing raw input to columnar batch. | -| spark.gluten.sql.columnar.shuffle.sort.partitions.threshold | 4000 | The threshold to determine whether to use sort-based columnar shuffle. Sort-based shuffle will be used if the number of partitions is greater than this threshold. | -| spark.gluten.sql.columnar.shuffle.typeAwareCompress.enabled | false | Enable type-aware compression (e.g. FFor for 64-bit integers) in shuffle. Not compatible with dictionary encoding; if both are enabled, type-aware compression is automatically disabled. | -| spark.gluten.sql.columnar.shuffledHashJoin | true | Enable or disable columnar shuffledHashJoin. | -| spark.gluten.sql.columnar.shuffledHashJoin.optimizeBuildSide | true | Whether to allow Gluten to choose an optimal build side for shuffled hash join. | -| spark.gluten.sql.columnar.smallFileThreshold | 0.5 | The total size threshold of small files in table scan.To avoid small files being placed into the same partition, Gluten will try to distribute small files into different partitions when the total size of small files is below this threshold. | -| spark.gluten.sql.columnar.sort | true | Enable or disable columnar sort. | -| spark.gluten.sql.columnar.sortMergeJoin | true | Enable or disable columnar sortMergeJoin. This should be set with preferSortMergeJoin=false. | -| spark.gluten.sql.columnar.tableCache | false | Enable or disable columnar table cache. | -| spark.gluten.sql.columnar.takeOrderedAndProject | true | -| spark.gluten.sql.columnar.union | true | Enable or disable columnar union. | -| spark.gluten.sql.columnar.wholeStage.fallback.threshold | -1 | The threshold for whether whole stage will fall back in AQE supported case by counting the number of ColumnarToRow & vanilla leaf node. | -| spark.gluten.sql.columnar.window | true | Enable or disable columnar window. | -| spark.gluten.sql.columnar.window.group.limit | true | Enable or disable columnar window group limit. | -| spark.gluten.sql.columnar.writeToDataSourceV2 | true | Enable or disable columnar v2 command write to data source v2. | -| spark.gluten.sql.columnarSampleEnabled | false | Disable or enable columnar sample. | -| spark.gluten.sql.columnarToRowMemoryThreshold | 64MB | -| spark.gluten.sql.countDistinctWithoutExpand | false | Convert Count Distinct to a UDAF called count_distinct to prevent SparkPlanner converting it to Expand+Count. WARNING: When enabled, count distinct queries will fail to fallback!!! | -| spark.gluten.sql.extendedColumnPruning.enabled | true | Do extended nested column pruning for cases ignored by vanilla Spark. | -| spark.gluten.sql.fallbackRegexpExpressions | false | If true, fall back all regexp expressions. There are a few incompatible cases between RE2 (used by native engine) and java.util.regex (used by Spark). User should enable this property if their incompatibility is intolerable. | -| spark.gluten.sql.fallbackUnexpectedMetadataParquet | false | If enabled, Gluten will not offload scan when unexpected metadata is detected. | -| spark.gluten.sql.fallbackUnexpectedMetadataParquet.limit | 10 | If supplied, metadata of `limit` number of Parquet files will be checked to determine whether to fall back to java scan. | -| spark.gluten.sql.injectNativePlanStringToExplain | false | When true, Gluten will inject native plan tree to Spark's explain output. | -| spark.gluten.sql.mergeTwoPhasesAggregate.enabled | true | Whether to merge two phases aggregate if there are no other operators between them. | -| spark.gluten.sql.native.arrow.reader.enabled | false | This is config to specify whether to enable the native columnar csv reader | -| spark.gluten.sql.native.bloomFilter | true | -| spark.gluten.sql.native.hive.writer.enabled | true | This is config to specify whether to enable the native columnar writer for HiveFileFormat. Currently only supports HiveFileFormat with Parquet as the output file type. | -| spark.gluten.sql.native.hyperLogLog.Aggregate | true | -| spark.gluten.sql.native.parquet.write.blockRows | 100000000 | -| spark.gluten.sql.native.union | false | Enable or disable native union where computation is completely offloaded to backend. | -| spark.gluten.sql.native.writeColumnMetadataExclusionList | comment | Native write files does not support column metadata. Metadata in list would be removed to support native write files. Multiple values separated by commas. | -| spark.gluten.sql.native.writer.enabled | <undefined> | This is config to specify whether to enable the native columnar parquet/orc writer | -| spark.gluten.sql.orc.charType.scan.fallback.enabled | true | Force fallback for orc char type scan. | -| spark.gluten.sql.pushAggregateThroughJoin.enabled | false | Enables the push-aggregate-through-join optimization in Gluten. When enabled, aggregate operators may be pushed below joins during logical optimization and corresponding physical plans may be rewritten to execute the aggregation earlier. | -| spark.gluten.sql.pushAggregateThroughJoin.maxDepth | 2147483647 | Maximum join traversal depth when applying the push-aggregate-through-join optimization. A value of 1 allows pushing an aggregate through a single join; larger values allow the rule to traverse and push through multiple consecutive joins. | -| spark.gluten.sql.removeNativeWriteFilesSortAndProject | true | When true, Gluten will remove the vanilla Spark V1Writes added sort and project for velox backend. | -| spark.gluten.sql.rewrite.dateTimestampComparison | true | Rewrite the comparision between date and timestamp to timestamp comparison.For example `from_unixtime(ts) > date` will be rewritten to `ts > to_unixtime(date)` | -| spark.gluten.sql.scan.fileSchemeValidation.enabled | true | When true, enable file path scheme validation for scan. Validation will fail if file scheme is not supported by registered file systems, which will cause scan operator fall back. | -| spark.gluten.sql.supported.flattenNestedFunctions | and,or | Flatten nested functions as one for optimization. | -| spark.gluten.sql.text.input.empty.as.default | false | treat empty fields in CSV input as default values. | -| spark.gluten.sql.text.input.max.block.size | 8KB | the max block size for text input rows | -| spark.gluten.sql.validation.printStackOnFailure | false | -| spark.gluten.storage.hdfsViewfs.enabled | false | If enabled, gluten will convert the viewfs path to hdfs path in scala side | -| spark.gluten.supported.hive.udfs || Supported hive udf names. | -| spark.gluten.supported.python.udfs || Supported python udf names. | -| spark.gluten.supported.scala.udfs || Supported scala udf names. | -| spark.gluten.ui.enabled | true | Whether to enable the gluten web UI, If true, attach the gluten UI page to the Spark web UI. | +| Key | Status | Default | Description | +|--------------------------------------------------------------------|------------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| spark.gluten.costModel | 🔄 Dynamic | legacy | The class name of user-defined cost model that will be used by Gluten's transition planner. If not specified, a legacy built-in cost model will be used. The legacy cost model helps RAS planner exhaustively offload computations, and helps transition planner choose columnar-to-columnar transition over others. | +| spark.gluten.enabled | 🔄 Dynamic | true | Whether to enable gluten. Default value is true. Just an experimental property. Recommend to enable/disable Gluten through the setting for spark.plugins. | +| spark.gluten.execution.resource.expired.time | 🔄 Dynamic | 86400 | Expired time of execution with resource relation has cached. | +| spark.gluten.expression.blacklist | 🔄 Dynamic | <undefined> | A black list of expression to skip transform, multiple values separated by commas. | +| spark.gluten.loadLibFromJar | 🔄 Dynamic | false | Whether to load shared libraries from jars. | +| spark.gluten.loadLibOS | 🔄 Dynamic | <undefined> | The shared library loader's OS name. | +| spark.gluten.loadLibOSVersion | 🔄 Dynamic | <undefined> | The shared library loader's OS version. | +| spark.gluten.memory.isolation | 🔄 Dynamic | false | Enable isolated memory mode. If true, Gluten controls the maximum off-heap memory can be used by each task to X, X = executor memory / max task slots. It's recommended to set true if Gluten serves concurrent queries within a single session, since not all memory Gluten allocated is guaranteed to be spillable. In the case, the feature should be enabled to avoid OOM. | +| spark.gluten.memory.overAcquiredMemoryRatio | 🔄 Dynamic | 0.3 | If larger than 0, Velox backend will try over-acquire this ratio of the total allocated memory as backup to avoid OOM. | +| spark.gluten.memory.reservationBlockSize | 🔄 Dynamic | 8MB | Block size of native reservation listener reserve memory from Spark. | +| spark.gluten.numTaskSlotsPerExecutor | 🔄 Dynamic | -1 | Must provide default value since non-execution operations (e.g. org.apache.spark.sql.Dataset#summary) doesn't propagate configurations using org.apache.spark.sql.execution.SQLExecution#withSQLConfPropagated | +| spark.gluten.shuffleWriter.bufferSize | 🔄 Dynamic | <undefined> | +| spark.gluten.soft-affinity.duplicateReading.maxCacheItems | 🔄 Dynamic | 10000 | Enable Soft Affinity duplicate reading detection | +| spark.gluten.soft-affinity.duplicateReadingDetect.enabled | 🔄 Dynamic | false | If true, Enable Soft Affinity duplicate reading detection | +| spark.gluten.soft-affinity.enabled | 🔄 Dynamic | false | Whether to enable Soft Affinity scheduling. | +| spark.gluten.soft-affinity.min.target-hosts | 🔄 Dynamic | 1 | For on HDFS, if there are already target hosts, and then prefer to use the original target hosts to schedule | +| spark.gluten.soft-affinity.replications.num | 🔄 Dynamic | 2 | Calculate the number of the replications for scheduling to the target executors per file | +| spark.gluten.sql.adaptive.costEvaluator.enabled | ⚓ Static | true | If true, use org.apache.spark.sql.execution.adaptive.GlutenCostEvaluator as custom cost evaluator class, else follow the configuration spark.sql.adaptive.customCostEvaluatorClass. | +| spark.gluten.sql.ansiFallback.enabled | 🔄 Dynamic | true | When true (default), Gluten will fall back to Spark when ANSI mode is enabled. When false, Gluten will attempt to execute in ANSI mode. | +| spark.gluten.sql.broadcastNestedLoopJoinTransformerEnabled | 🔄 Dynamic | true | Config to enable BroadcastNestedLoopJoinExecTransformer. | +| spark.gluten.sql.cacheWholeStageTransformerContext | 🔄 Dynamic | false | When true, `WholeStageTransformer` will cache the `WholeStageTransformerContext` when executing. It is used to get substrait plan node and native plan string. | +| spark.gluten.sql.cartesianProductTransformerEnabled | 🔄 Dynamic | true | Config to enable CartesianProductExecTransformer. | +| spark.gluten.sql.collapseGetJsonObject.enabled | 🔄 Dynamic | false | Collapse nested get_json_object functions as one for optimization. | +| spark.gluten.sql.columnar.appendData | 🔄 Dynamic | true | Enable or disable columnar v2 command append data. | +| spark.gluten.sql.columnar.arrowUdf | 🔄 Dynamic | true | Enable or disable columnar arrow udf. | +| spark.gluten.sql.columnar.batchscan | 🔄 Dynamic | true | Enable or disable columnar batchscan. | +| spark.gluten.sql.columnar.broadcastExchange | 🔄 Dynamic | true | Enable or disable columnar broadcastExchange. | +| spark.gluten.sql.columnar.broadcastJoin | 🔄 Dynamic | true | Enable or disable columnar broadcastJoin. | +| spark.gluten.sql.columnar.cast.avg | 🔄 Dynamic | true | +| spark.gluten.sql.columnar.coalesce | 🔄 Dynamic | true | Enable or disable columnar coalesce. | +| spark.gluten.sql.columnar.collectLimit | 🔄 Dynamic | true | Enable or disable columnar collectLimit. | +| spark.gluten.sql.columnar.collectTail | 🔄 Dynamic | true | Enable or disable columnar collectTail. | +| spark.gluten.sql.columnar.enableNestedColumnPruningInHiveTableScan | 🔄 Dynamic | true | Enable or disable nested column pruning in hivetablescan. | +| spark.gluten.sql.columnar.enableVanillaVectorizedReaders | ⚓ Static | true | Enable or disable vanilla vectorized scan. | +| spark.gluten.sql.columnar.executor.libpath | 🔄 Dynamic || The gluten executor library path. | +| spark.gluten.sql.columnar.expand | 🔄 Dynamic | true | Enable or disable columnar expand. | +| spark.gluten.sql.columnar.fallback.expressions.threshold | 🔄 Dynamic | 50 | Fall back filter/project if number of nested expressions reaches this threshold, considering Spark codegen can bring better performance for such case. | +| spark.gluten.sql.columnar.fallback.ignoreRowToColumnar | 🔄 Dynamic | true | When true, the fallback policy ignores the RowToColumnar when counting fallback number. | +| spark.gluten.sql.columnar.fallback.preferColumnar | 🔄 Dynamic | true | When true, the fallback policy prefers to use Gluten plan rather than vanilla Spark plan if the both of them contains ColumnarToRow and the vanilla Spark plan ColumnarToRow number is not smaller than Gluten plan. | +| spark.gluten.sql.columnar.filescan | 🔄 Dynamic | true | Enable or disable columnar filescan. | +| spark.gluten.sql.columnar.filter | 🔄 Dynamic | true | Enable or disable columnar filter. | +| spark.gluten.sql.columnar.force.hashagg | 🔄 Dynamic | true | Whether to force to use gluten's hash agg for replacing vanilla spark's sort agg. | +| spark.gluten.sql.columnar.forceShuffledHashJoin | 🔄 Dynamic | true | +| spark.gluten.sql.columnar.generate | 🔄 Dynamic | true | +| spark.gluten.sql.columnar.hashagg | 🔄 Dynamic | true | Enable or disable columnar hashagg. | +| spark.gluten.sql.columnar.hivetablescan | 🔄 Dynamic | true | Enable or disable columnar hivetablescan. | +| spark.gluten.sql.columnar.libname | 🔄 Dynamic | gluten | The gluten library name. | +| spark.gluten.sql.columnar.libpath | 🔄 Dynamic || The gluten library path. | +| spark.gluten.sql.columnar.limit | 🔄 Dynamic | true | +| spark.gluten.sql.columnar.maxBatchSize | 🔄 Dynamic | 4096 | +| spark.gluten.sql.columnar.overwriteByExpression | 🔄 Dynamic | true | Enable or disable columnar v2 command overwrite by expression. | +| spark.gluten.sql.columnar.overwritePartitionsDynamic | 🔄 Dynamic | true | Enable or disable columnar v2 command overwrite partitions dynamic. | +| spark.gluten.sql.columnar.parquet.write.blockSize | 🔄 Dynamic | 128MB | +| spark.gluten.sql.columnar.partial.generate | 🔄 Dynamic | true | Evaluates the non-offload-able HiveUDTF using vanilla Spark generator | +| spark.gluten.sql.columnar.partial.project | 🔄 Dynamic | true | Break up one project node into 2 phases when some of the expressions are non offload-able. Phase one is a regular offloaded project transformer that evaluates the offload-able expressions in native, phase two preserves the output from phase one and evaluates the remaining non-offload-able expressions using vanilla Spark projections | +| spark.gluten.sql.columnar.physicalJoinOptimizationLevel | 🔄 Dynamic | 12 | Fallback to row operators if there are several continuous joins. | +| spark.gluten.sql.columnar.physicalJoinOptimizationOutputSize | 🔄 Dynamic | 52 | Fallback to row operators if there are several continuous joins and matched output size. | +| spark.gluten.sql.columnar.physicalJoinOptimizeEnable | 🔄 Dynamic | false | Enable or disable columnar physicalJoinOptimize. | +| spark.gluten.sql.columnar.preferStreamingAggregate | 🔄 Dynamic | true | Velox backend supports `StreamingAggregate`. `StreamingAggregate` uses the less memory as it does not need to hold all groups in memory, so it could avoid spill. When true and the child output ordering satisfies the grouping key then Gluten will choose `StreamingAggregate` as the native operator. | +| spark.gluten.sql.columnar.project | 🔄 Dynamic | true | Enable or disable columnar project. | +| spark.gluten.sql.columnar.project.collapse | 🔄 Dynamic | true | Combines two columnar project operators into one and perform alias substitution | +| spark.gluten.sql.columnar.query.fallback.threshold | 🔄 Dynamic | -1 | The threshold for whether query will fall back by counting the number of ColumnarToRow & vanilla leaf node. | +| spark.gluten.sql.columnar.range | 🔄 Dynamic | true | Enable or disable columnar range. | +| spark.gluten.sql.columnar.replaceData | 🔄 Dynamic | true | Enable or disable columnar v2 command replace data. | +| spark.gluten.sql.columnar.scanOnly | 🔄 Dynamic | false | When enabled, only scan and the filter after scan will be offloaded to native. | +| spark.gluten.sql.columnar.shuffle | 🔄 Dynamic | true | Enable or disable columnar shuffle. | +| spark.gluten.sql.columnar.shuffle.celeborn.fallback.enabled | ⚓ Static | true | If enabled, fall back to ColumnarShuffleManager when celeborn service is unavailable.Otherwise, throw an exception. | +| spark.gluten.sql.columnar.shuffle.celeborn.useRssSort | 🔄 Dynamic | true | If true, use RSS sort implementation for Celeborn sort-based shuffle.If false, use Gluten's row-based sort implementation. Only valid when `spark.celeborn.client.spark.shuffle.writer` is set to `sort`. | +| spark.gluten.sql.columnar.shuffle.codec | 🔄 Dynamic | <undefined> | By default, the supported codecs are lz4 and zstd. When spark.gluten.sql.columnar.shuffle.codecBackend=qat,the supported codecs are gzip and zstd. | +| spark.gluten.sql.columnar.shuffle.codecBackend | 🔄 Dynamic | <undefined> | +| spark.gluten.sql.columnar.shuffle.compression.threshold | 🔄 Dynamic | 100 | If number of rows in a batch falls below this threshold, will copy all buffers into one buffer to compress. | +| spark.gluten.sql.columnar.shuffle.dictionary.enabled | 🔄 Dynamic | false | Enable dictionary in hash-based shuffle. | +| spark.gluten.sql.columnar.shuffle.merge.threshold | 🔄 Dynamic | 0.25 | +| spark.gluten.sql.columnar.shuffle.readerBufferSize | 🔄 Dynamic | 1MB | Buffer size in bytes for shuffle reader reading input stream from local or remote. | +| spark.gluten.sql.columnar.shuffle.realloc.threshold | 🔄 Dynamic | 0.25 | +| spark.gluten.sql.columnar.shuffle.sort.columns.threshold | 🔄 Dynamic | 100000 | The threshold to determine whether to use sort-based columnar shuffle. Sort-based shuffle will be used if the number of columns is greater than this threshold. | +| spark.gluten.sql.columnar.shuffle.sort.deserializerBufferSize | 🔄 Dynamic | 1MB | Buffer size in bytes for sort-based shuffle reader deserializing raw input to columnar batch. | +| spark.gluten.sql.columnar.shuffle.sort.partitions.threshold | 🔄 Dynamic | 4000 | The threshold to determine whether to use sort-based columnar shuffle. Sort-based shuffle will be used if the number of partitions is greater than this threshold. | +| spark.gluten.sql.columnar.shuffle.typeAwareCompress.enabled | 🔄 Dynamic | false | Enable type-aware compression (e.g. FFor for 64-bit integers) in shuffle. Not compatible with dictionary encoding; if both are enabled, type-aware compression is automatically disabled. | +| spark.gluten.sql.columnar.shuffledHashJoin | 🔄 Dynamic | true | Enable or disable columnar shuffledHashJoin. | +| spark.gluten.sql.columnar.shuffledHashJoin.optimizeBuildSide | 🔄 Dynamic | true | Whether to allow Gluten to choose an optimal build side for shuffled hash join. | +| spark.gluten.sql.columnar.smallFileThreshold | 🔄 Dynamic | 0.5 | The total size threshold of small files in table scan.To avoid small files being placed into the same partition, Gluten will try to distribute small files into different partitions when the total size of small files is below this threshold. | +| spark.gluten.sql.columnar.sort | 🔄 Dynamic | true | Enable or disable columnar sort. | +| spark.gluten.sql.columnar.sortMergeJoin | 🔄 Dynamic | true | Enable or disable columnar sortMergeJoin. This should be set with preferSortMergeJoin=false. | +| spark.gluten.sql.columnar.tableCache | ⚓ Static | false | Enable or disable columnar table cache. | +| spark.gluten.sql.columnar.takeOrderedAndProject | 🔄 Dynamic | true | +| spark.gluten.sql.columnar.union | 🔄 Dynamic | true | Enable or disable columnar union. | +| spark.gluten.sql.columnar.wholeStage.fallback.threshold | 🔄 Dynamic | -1 | The threshold for whether whole stage will fall back in AQE supported case by counting the number of ColumnarToRow & vanilla leaf node. | +| spark.gluten.sql.columnar.window | 🔄 Dynamic | true | Enable or disable columnar window. | +| spark.gluten.sql.columnar.window.group.limit | 🔄 Dynamic | true | Enable or disable columnar window group limit. | +| spark.gluten.sql.columnar.writeToDataSourceV2 | 🔄 Dynamic | true | Enable or disable columnar v2 command write to data source v2. | +| spark.gluten.sql.columnarSampleEnabled | 🔄 Dynamic | false | Disable or enable columnar sample. | +| spark.gluten.sql.columnarToRowMemoryThreshold | 🔄 Dynamic | 64MB | +| spark.gluten.sql.countDistinctWithoutExpand | 🔄 Dynamic | false | Convert Count Distinct to a UDAF called count_distinct to prevent SparkPlanner converting it to Expand+Count. WARNING: When enabled, count distinct queries will fail to fallback!!! | +| spark.gluten.sql.extendedColumnPruning.enabled | 🔄 Dynamic | true | Do extended nested column pruning for cases ignored by vanilla Spark. | +| spark.gluten.sql.fallbackRegexpExpressions | 🔄 Dynamic | false | If true, fall back all regexp expressions. There are a few incompatible cases between RE2 (used by native engine) and java.util.regex (used by Spark). User should enable this property if their incompatibility is intolerable. | +| spark.gluten.sql.fallbackUnexpectedMetadataParquet | 🔄 Dynamic | false | If enabled, Gluten will not offload scan when unexpected metadata is detected. | +| spark.gluten.sql.fallbackUnexpectedMetadataParquet.limit | 🔄 Dynamic | 10 | If supplied, metadata of `limit` number of Parquet files will be checked to determine whether to fall back to java scan. | +| spark.gluten.sql.injectNativePlanStringToExplain | 🔄 Dynamic | false | When true, Gluten will inject native plan tree to Spark's explain output. | +| spark.gluten.sql.mergeTwoPhasesAggregate.enabled | 🔄 Dynamic | true | Whether to merge two phases aggregate if there are no other operators between them. | +| spark.gluten.sql.native.arrow.reader.enabled | 🔄 Dynamic | false | This is config to specify whether to enable the native columnar csv reader | +| spark.gluten.sql.native.bloomFilter | 🔄 Dynamic | true | +| spark.gluten.sql.native.hive.writer.enabled | 🔄 Dynamic | true | This is config to specify whether to enable the native columnar writer for HiveFileFormat. Currently only supports HiveFileFormat with Parquet as the output file type. | +| spark.gluten.sql.native.hyperLogLog.Aggregate | 🔄 Dynamic | true | +| spark.gluten.sql.native.parquet.write.blockRows | 🔄 Dynamic | 100000000 | +| spark.gluten.sql.native.union | 🔄 Dynamic | false | Enable or disable native union where computation is completely offloaded to backend. | +| spark.gluten.sql.native.writeColumnMetadataExclusionList | 🔄 Dynamic | comment | Native write files does not support column metadata. Metadata in list would be removed to support native write files. Multiple values separated by commas. | +| spark.gluten.sql.native.writer.enabled | 🔄 Dynamic | <undefined> | This is config to specify whether to enable the native columnar parquet/orc writer | +| spark.gluten.sql.orc.charType.scan.fallback.enabled | 🔄 Dynamic | true | Force fallback for orc char type scan. | +| spark.gluten.sql.pushAggregateThroughJoin.enabled | 🔄 Dynamic | false | Enables the push-aggregate-through-join optimization in Gluten. When enabled, aggregate operators may be pushed below joins during logical optimization and corresponding physical plans may be rewritten to execute the aggregation earlier. | +| spark.gluten.sql.pushAggregateThroughJoin.maxDepth | 🔄 Dynamic | 2147483647 | Maximum join traversal depth when applying the push-aggregate-through-join optimization. A value of 1 allows pushing an aggregate through a single join; larger values allow the rule to traverse and push through multiple consecutive joins. | +| spark.gluten.sql.removeNativeWriteFilesSortAndProject | 🔄 Dynamic | true | When true, Gluten will remove the vanilla Spark V1Writes added sort and project for velox backend. | +| spark.gluten.sql.rewrite.dateTimestampComparison | 🔄 Dynamic | true | Rewrite the comparision between date and timestamp to timestamp comparison.For example `from_unixtime(ts) > date` will be rewritten to `ts > to_unixtime(date)` | +| spark.gluten.sql.scan.fileSchemeValidation.enabled | 🔄 Dynamic | true | When true, enable file path scheme validation for scan. Validation will fail if file scheme is not supported by registered file systems, which will cause scan operator fall back. | +| spark.gluten.sql.supported.flattenNestedFunctions | 🔄 Dynamic | and,or | Flatten nested functions as one for optimization. | +| spark.gluten.sql.text.input.empty.as.default | 🔄 Dynamic | false | treat empty fields in CSV input as default values. | +| spark.gluten.sql.text.input.max.block.size | 🔄 Dynamic | 8KB | the max block size for text input rows | +| spark.gluten.sql.validation.printStackOnFailure | 🔄 Dynamic | false | +| spark.gluten.storage.hdfsViewfs.enabled | ⚓ Static | false | If enabled, gluten will convert the viewfs path to hdfs path in scala side | +| spark.gluten.supported.hive.udfs | 🔄 Dynamic || Supported hive udf names. | +| spark.gluten.supported.python.udfs | 🔄 Dynamic || Supported python udf names. | +| spark.gluten.supported.scala.udfs | 🔄 Dynamic || Supported scala udf names. | +| spark.gluten.ui.enabled | ⚓ Static | true | Whether to enable the gluten web UI, If true, attach the gluten UI page to the Spark web UI. | ## Gluten *experimental* configurations -| Key | Default | Description | -|-------------------------------------------------------------------|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| spark.gluten.auto.adjustStageResource.enabled | false | Experimental: If enabled, gluten will try to set the stage resource according to stage execution plan. Only worked when aqe is enabled at the same time!! | -| spark.gluten.auto.adjustStageResources.fallenNode.ratio.threshold | 0.5 | Experimental: Increase executor heap memory when stage contains fallen node count exceeds the total node count ratio. | -| spark.gluten.auto.adjustStageResources.heap.ratio | 2.0 | Experimental: Increase executor heap memory when match adjust stage resource rule. | -| spark.gluten.auto.adjustStageResources.offheap.ratio | 0.5 | Experimental: Decrease executor offheap memory when match adjust stage resource rule. | -| spark.gluten.memory.dynamic.offHeap.sizing.enabled | false | Experimental: When set to true, the offheap config (spark.memory.offHeap.size) will be ignored and instead we will consider onheap and offheap memory in combination, both counting towards the executor memory config (spark.executor.memory). We will make use of JVM APIs to determine how much onheap memory is use, alongside tracking offheap allocations made by Gluten. We will then proceed to enforcing a total memory quota, calculated by the sum of what memory is committed and in use in the Java heap. Since the calculation of the total quota happens as offheap allocation happens and not as JVM heap memory is allocated, it is possible that we can oversubscribe memory. Additionally, note that this change is experimental and may have performance implications. | -| spark.gluten.memory.dynamic.offHeap.sizing.memory.fraction | 0.6 | Experimental: Determines the memory fraction used to determine the total memory available for offheap and onheap allocations when the dynamic offheap sizing feature is enabled. The default is set to match spark.executor.memoryFraction. | -| spark.gluten.sql.columnar.cudf | false | Enable or disable cudf support. This is an experimental feature. | +| Key | Status | Default | Description | +|-------------------------------------------------------------------|------------|---------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| spark.gluten.auto.adjustStageResource.enabled | 🔄 Dynamic | false | Experimental: If enabled, gluten will try to set the stage resource according to stage execution plan. Only worked when aqe is enabled at the same time!! | +| spark.gluten.auto.adjustStageResources.fallenNode.ratio.threshold | 🔄 Dynamic | 0.5 | Experimental: Increase executor heap memory when stage contains fallen node count exceeds the total node count ratio. | +| spark.gluten.auto.adjustStageResources.heap.ratio | 🔄 Dynamic | 2.0 | Experimental: Increase executor heap memory when match adjust stage resource rule. | +| spark.gluten.auto.adjustStageResources.offheap.ratio | 🔄 Dynamic | 0.5 | Experimental: Decrease executor offheap memory when match adjust stage resource rule. | +| spark.gluten.memory.dynamic.offHeap.sizing.enabled | ⚓ Static | false | Experimental: When set to true, the offheap config (spark.memory.offHeap.size) will be ignored and instead we will consider onheap and offheap memory in combination, both counting towards the executor memory config (spark.executor.memory). We will make use of JVM APIs to determine how much onheap memory is use, alongside tracking offheap allocations made by Gluten. We will then proceed to enforcing a total memory quota, calculated by the sum of what memory is committed and in use in the Java heap. Since the calculation of the total quota happens as offheap allocation happens and not as JVM heap memory is allocated, it is possible that we can oversubscribe memory. Additionally, note that this change is experimental and may have performance implications. | +| spark.gluten.memory.dynamic.offHeap.sizing.memory.fraction | ⚓ Static | 0.6 | Experimental: Determines the memory fraction used to determine the total memory available for offheap and onheap allocations when the dynamic offheap sizing feature is enabled. The default is set to match spark.executor.memoryFraction. | +| spark.gluten.sql.columnar.cudf | 🔄 Dynamic | false | Enable or disable cudf support. This is an experimental feature. | diff --git a/docs/velox-configuration.md b/docs/velox-configuration.md index a608dfbc450b..bdcd20a6f0f7 100644 --- a/docs/velox-configuration.md +++ b/docs/velox-configuration.md @@ -9,85 +9,85 @@ nav_order: 16 ## Gluten Velox backend configurations -| Key | Default | Description | -|----------------------------------------------------------------------------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| spark.gluten.sql.columnar.backend.velox.IOThreads | <undefined> | The Size of the IO thread pool in the Connector. This thread pool is used for split preloading and DirectBufferedInput. By default, the value is the same as the maximum task slots per Spark executor. | -| spark.gluten.sql.columnar.backend.velox.SplitPreloadPerDriver | 2 | The split preload per task | -| spark.gluten.sql.columnar.backend.velox.abandonPartialAggregationMinPct | 90 | If partial aggregation aggregationPct greater than this value, partial aggregation may be early abandoned. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.abandonPartialAggregationMinRows | 100000 | If partial aggregation input rows number greater than this value, partial aggregation may be early abandoned. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.asyncTimeoutOnTaskStopping | 30000ms | Timeout in milliseconds when waiting for runtime-scoped async work to finish during teardown. | -| spark.gluten.sql.columnar.backend.velox.cacheEnabled | false | Enable Velox cache, default off. It's recommended to enablesoft-affinity as well when enable velox cache. | -| spark.gluten.sql.columnar.backend.velox.cachePrefetchMinPct | 0 | Set prefetch cache min pct for velox file scan | -| spark.gluten.sql.columnar.backend.velox.checkUsageLeak | true | Enable check memory usage leak. | -| spark.gluten.sql.columnar.backend.velox.cudf.batchSize | 2147483647 | Cudf input batch size after shuffle reader | -| spark.gluten.sql.columnar.backend.velox.cudf.enableTableScan | false | Enable cudf table scan | -| spark.gluten.sql.columnar.backend.velox.cudf.enableValidation | true | Heuristics you can apply to validate a cuDF/GPU plan and only offload when the entire stage can be fully and profitably executed on GPU | -| spark.gluten.sql.columnar.backend.velox.cudf.memoryPercent | 50 | The initial percent of GPU memory to allocate for memory resource for one thread. | -| spark.gluten.sql.columnar.backend.velox.cudf.memoryResource | async | GPU RMM memory resource. | -| spark.gluten.sql.columnar.backend.velox.cudf.shuffleMaxPrefetchBytes | 1028MB | Maximum bytes to prefetch in CPU memory during GPU shuffle read while waiting for GPU available. | -| spark.gluten.sql.columnar.backend.velox.directorySizeGuess | 32KB | Deprecated, rename to spark.gluten.sql.columnar.backend.velox.footerEstimatedSize | -| spark.gluten.sql.columnar.backend.velox.enableTimestampNtzValidation | true | Enable validation fallback for TimestampNTZ type. When true (default), any plan containing TimestampNTZ will fall back to Spark execution. Set to false during development/testing of TimestampNTZ support to allow native execution. | -| spark.gluten.sql.columnar.backend.velox.fileHandleCacheEnabled | false | Disables caching if false. File handle cache should be disabled if files are mutable, i.e. file content may change while file path stays the same. | -| spark.gluten.sql.columnar.backend.velox.filePreloadThreshold | 1MB | Set the file preload threshold for velox file scan, refer to Velox's file-preload-threshold | -| spark.gluten.sql.columnar.backend.velox.floatingPointMode | loose | Config used to control the tolerance of floating point operations alignment with Spark. When the mode is set to strict, flushing is disabled for sum(float/double)and avg(float/double). When set to loose, flushing will be enabled. | -| spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation | true | Enable flushable aggregation. If true, Gluten will try converting regular aggregation into Velox's flushable aggregation when applicable. A flushable aggregation could emit intermediate result at anytime when memory is full / data reduction ratio is low. | -| spark.gluten.sql.columnar.backend.velox.footerEstimatedSize | 32KB | Set the footer estimated size for velox file scan, refer to Velox's footer-estimated-size | -| spark.gluten.sql.columnar.backend.velox.hashProbe.bloomFilterPushdown.maxSize | 0b | The maximum byte size of Bloom filter that can be generated from hash probe. When set to 0, no Bloom filter will be generated. To achieve optimal performance, this should not be too larger than the CPU cache size on the host. | -| spark.gluten.sql.columnar.backend.velox.hashProbe.dynamicFilterPushdown.enabled | true | Whether hash probe can generate any dynamic filter (including Bloom filter) and push down to upstream operators. | -| spark.gluten.sql.columnar.backend.velox.loadQuantum | 256MB | Set the load quantum for velox file scan, recommend to use the default value (256MB) for performance consideration. If Velox cache is enabled, it can be 8MB at most. | -| spark.gluten.sql.columnar.backend.velox.maxCoalescedBytes | 64MB | Set the max coalesced bytes for velox file scan | -| spark.gluten.sql.columnar.backend.velox.maxCoalescedDistance | 512KB | Set the max coalesced distance bytes for velox file scan | -| spark.gluten.sql.columnar.backend.velox.maxCompiledRegexes | 100 | Controls maximum number of compiled regular expression patterns per function instance per thread of execution. | -| spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemory | <undefined> | Set the max extended memory of partial aggregation in bytes. When this option is set to a value greater than 0, it will override spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemoryRatio. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemoryRatio | 0.15 | Set the max extended memory of partial aggregation as maxExtendedPartialAggregationMemoryRatio of offheap size. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemory | <undefined> | Set the max memory of partial aggregation in bytes. When this option is set to a value greater than 0, it will override spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemoryRatio. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemoryRatio | 0.1 | Set the max memory of partial aggregation as maxPartialAggregationMemoryRatio of offheap size. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | -| spark.gluten.sql.columnar.backend.velox.maxPartitionsPerWritersSession | 10000 | Maximum number of partitions per a single table writer instance. | -| spark.gluten.sql.columnar.backend.velox.maxSpillBytes | 100G | The maximum file size of a query | -| spark.gluten.sql.columnar.backend.velox.maxSpillFileSize | 1GB | The maximum size of a single spill file created | -| spark.gluten.sql.columnar.backend.velox.maxSpillLevel | 4 | The max allowed spilling level with zero being the initial spilling level | -| spark.gluten.sql.columnar.backend.velox.maxSpillRunRows | 3M | The maximum row size of a single spill run | -| spark.gluten.sql.columnar.backend.velox.maxTargetFileSize | 0b | The target file size for each output file when writing data. 0 means no limit on target file size, and the actual file size will be determined by other factors such as max partition number and shuffle batch size. | -| spark.gluten.sql.columnar.backend.velox.memCacheSize | 1GB | The memory cache size | -| spark.gluten.sql.columnar.backend.velox.memInitCapacity | 8MB | The initial memory capacity to reserve for a newly created Velox query memory pool. | -| spark.gluten.sql.columnar.backend.velox.memoryPoolCapacityTransferAcrossTasks | true | Whether to allow memory capacity transfer between memory pools from different tasks. | -| spark.gluten.sql.columnar.backend.velox.memoryUseHugePages | false | Use explicit huge pages for Velox memory allocation. | -| spark.gluten.sql.columnar.backend.velox.orc.scan.enabled | true | Enable velox orc scan. If disabled, vanilla spark orc scan will be used. | -| spark.gluten.sql.columnar.backend.velox.orcUseColumnNames | true | Maps table field names to file field names using names, not indices for ORC files. | -| spark.gluten.sql.columnar.backend.velox.parquet.pageSizeBytes | 1MB | The page size in bytes is for compression. | -| spark.gluten.sql.columnar.backend.velox.parquetUseColumnNames | true | Maps table field names to file field names using names, not indices for Parquet files. | -| spark.gluten.sql.columnar.backend.velox.prefetchRowGroups | 1 | Set the prefetch row groups for velox file scan | -| spark.gluten.sql.columnar.backend.velox.queryTraceEnabled | false | Enable query tracing flag. | -| spark.gluten.sql.columnar.backend.velox.reclaimMaxWaitMs | 3600000ms | The max time in ms to wait for memory reclaim. | -| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput | true | If true, combine small columnar batches together before sending to shuffle. The default minimum output batch size is equal to 0.25 * spark.gluten.sql.columnar.maxBatchSize | -| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput.minSize | <undefined> | The minimum batch size for shuffle. If size of an input batch is smaller than the value, it will be combined with other batches before sending to shuffle. Only functions when spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput is set to true. Default value: 0.25 * | -| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInputOuptut.minSize | <undefined> | The minimum batch size for shuffle input and output. If size of an input batch is smaller than the value, it will be combined with other batches before sending to shuffle. The same applies for batches output by shuffle read. Only functions when spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput or spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleOutput is set to true. Default value: 0.25 * | -| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleOutput | false | If true, combine small columnar batches together right after shuffle read. The default minimum output batch size is equal to 0.25 * spark.gluten.sql.columnar.maxBatchSize | -| spark.gluten.sql.columnar.backend.velox.showTaskMetricsWhenFinished | false | Show velox full task metrics when finished. | -| spark.gluten.sql.columnar.backend.velox.spillFileSystem | local | The filesystem used to store spill data. local: The local file system. heap-over-local: Write file to JVM heap if having extra heap space. Otherwise write to local file system. | -| spark.gluten.sql.columnar.backend.velox.spillStrategy | auto | none: Disable spill on Velox backend; auto: Let Spark memory manager manage Velox's spilling | -| spark.gluten.sql.columnar.backend.velox.ssdCacheIOThreads | 1 | The IO threads for cache promoting | -| spark.gluten.sql.columnar.backend.velox.ssdCachePath | /tmp | The folder to store the cache files, better on SSD | -| spark.gluten.sql.columnar.backend.velox.ssdCacheShards | 1 | The cache shards | -| spark.gluten.sql.columnar.backend.velox.ssdCacheSize | 1GB | The SSD cache size, will do memory caching only if this value = 0 | -| spark.gluten.sql.columnar.backend.velox.ssdCheckpointIntervalBytes | 0 | Checkpoint after every 'checkpointIntervalBytes' for SSD cache. 0 means no checkpointing. | -| spark.gluten.sql.columnar.backend.velox.ssdChecksumEnabled | false | If true, checksum write to SSD is enabled. | -| spark.gluten.sql.columnar.backend.velox.ssdChecksumReadVerificationEnabled | false | If true, checksum read verification from SSD is enabled. | -| spark.gluten.sql.columnar.backend.velox.ssdDisableFileCow | false | True if copy on write should be disabled. | -| spark.gluten.sql.columnar.backend.velox.ssdODirect | false | The O_DIRECT flag for cache writing | -| spark.gluten.sql.columnar.backend.velox.valueStream.dynamicFilter.enabled | false | Whether to apply dynamic filters pushed down from hash probe in the ValueStream (shuffle reader) operator to filter rows before they reach the hash join. | -| spark.gluten.sql.enable.enhancedFeatures | true | Enable some features including iceberg native write and other features. | -| spark.gluten.sql.rewrite.castArrayToString | true | When true, rewrite `cast(array as String)` to `concat('[', array_join(array, ', ', null), ']')` to allow offloading to Velox. | -| spark.gluten.velox.broadcast.build.targetBytesPerThread | 32MB | It is used to calculate the number of hash table build threads. Based on our testing across various thresholds (1MB to 128MB), we recommend a value of 32MB or 64MB, as these consistently provided the most significant performance gains. | -| spark.gluten.velox.castFromVarcharAddTrimNode | false | If true, will add a trim node which has the same semantic as vanilla Spark to CAST-from-varchar.Otherwise, do nothing. | +| Key | Status | Default | Description | +|----------------------------------------------------------------------------------|------------|-------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| spark.gluten.sql.columnar.backend.velox.IOThreads | ⚓ Static | <undefined> | The Size of the IO thread pool in the Connector. This thread pool is used for split preloading and DirectBufferedInput. By default, the value is the same as the maximum task slots per Spark executor. | +| spark.gluten.sql.columnar.backend.velox.SplitPreloadPerDriver | 🔄 Dynamic | 2 | The split preload per task | +| spark.gluten.sql.columnar.backend.velox.abandonPartialAggregationMinPct | 🔄 Dynamic | 90 | If partial aggregation aggregationPct greater than this value, partial aggregation may be early abandoned. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.abandonPartialAggregationMinRows | 🔄 Dynamic | 100000 | If partial aggregation input rows number greater than this value, partial aggregation may be early abandoned. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.asyncTimeoutOnTaskStopping | ⚓ Static | 30000ms | Timeout in milliseconds when waiting for runtime-scoped async work to finish during teardown. | +| spark.gluten.sql.columnar.backend.velox.cacheEnabled | ⚓ Static | false | Enable Velox cache, default off. It's recommended to enablesoft-affinity as well when enable velox cache. | +| spark.gluten.sql.columnar.backend.velox.cachePrefetchMinPct | ⚓ Static | 0 | Set prefetch cache min pct for velox file scan | +| spark.gluten.sql.columnar.backend.velox.checkUsageLeak | ⚓ Static | true | Enable check memory usage leak. | +| spark.gluten.sql.columnar.backend.velox.cudf.batchSize | 🔄 Dynamic | 2147483647 | Cudf input batch size after shuffle reader | +| spark.gluten.sql.columnar.backend.velox.cudf.enableTableScan | ⚓ Static | false | Enable cudf table scan | +| spark.gluten.sql.columnar.backend.velox.cudf.enableValidation | ⚓ Static | true | Heuristics you can apply to validate a cuDF/GPU plan and only offload when the entire stage can be fully and profitably executed on GPU | +| spark.gluten.sql.columnar.backend.velox.cudf.memoryPercent | ⚓ Static | 50 | The initial percent of GPU memory to allocate for memory resource for one thread. | +| spark.gluten.sql.columnar.backend.velox.cudf.memoryResource | ⚓ Static | async | GPU RMM memory resource. | +| spark.gluten.sql.columnar.backend.velox.cudf.shuffleMaxPrefetchBytes | 🔄 Dynamic | 1028MB | Maximum bytes to prefetch in CPU memory during GPU shuffle read while waiting for GPU available. | +| spark.gluten.sql.columnar.backend.velox.directorySizeGuess | ⚓ Static | 32KB | Deprecated, rename to spark.gluten.sql.columnar.backend.velox.footerEstimatedSize | +| spark.gluten.sql.columnar.backend.velox.enableTimestampNtzValidation | 🔄 Dynamic | true | Enable validation fallback for TimestampNTZ type. When true (default), any plan containing TimestampNTZ will fall back to Spark execution. Set to false during development/testing of TimestampNTZ support to allow native execution. | +| spark.gluten.sql.columnar.backend.velox.fileHandleCacheEnabled | ⚓ Static | false | Disables caching if false. File handle cache should be disabled if files are mutable, i.e. file content may change while file path stays the same. | +| spark.gluten.sql.columnar.backend.velox.filePreloadThreshold | ⚓ Static | 1MB | Set the file preload threshold for velox file scan, refer to Velox's file-preload-threshold | +| spark.gluten.sql.columnar.backend.velox.floatingPointMode | 🔄 Dynamic | loose | Config used to control the tolerance of floating point operations alignment with Spark. When the mode is set to strict, flushing is disabled for sum(float/double)and avg(float/double). When set to loose, flushing will be enabled. | +| spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation | 🔄 Dynamic | true | Enable flushable aggregation. If true, Gluten will try converting regular aggregation into Velox's flushable aggregation when applicable. A flushable aggregation could emit intermediate result at anytime when memory is full / data reduction ratio is low. | +| spark.gluten.sql.columnar.backend.velox.footerEstimatedSize | ⚓ Static | 32KB | Set the footer estimated size for velox file scan, refer to Velox's footer-estimated-size | +| spark.gluten.sql.columnar.backend.velox.hashProbe.bloomFilterPushdown.maxSize | 🔄 Dynamic | 0b | The maximum byte size of Bloom filter that can be generated from hash probe. When set to 0, no Bloom filter will be generated. To achieve optimal performance, this should not be too larger than the CPU cache size on the host. | +| spark.gluten.sql.columnar.backend.velox.hashProbe.dynamicFilterPushdown.enabled | 🔄 Dynamic | true | Whether hash probe can generate any dynamic filter (including Bloom filter) and push down to upstream operators. | +| spark.gluten.sql.columnar.backend.velox.loadQuantum | ⚓ Static | 256MB | Set the load quantum for velox file scan, recommend to use the default value (256MB) for performance consideration. If Velox cache is enabled, it can be 8MB at most. | +| spark.gluten.sql.columnar.backend.velox.maxCoalescedBytes | ⚓ Static | 64MB | Set the max coalesced bytes for velox file scan | +| spark.gluten.sql.columnar.backend.velox.maxCoalescedDistance | ⚓ Static | 512KB | Set the max coalesced distance bytes for velox file scan | +| spark.gluten.sql.columnar.backend.velox.maxCompiledRegexes | 🔄 Dynamic | 100 | Controls maximum number of compiled regular expression patterns per function instance per thread of execution. | +| spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemory | 🔄 Dynamic | <undefined> | Set the max extended memory of partial aggregation in bytes. When this option is set to a value greater than 0, it will override spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemoryRatio. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.maxExtendedPartialAggregationMemoryRatio | 🔄 Dynamic | 0.15 | Set the max extended memory of partial aggregation as maxExtendedPartialAggregationMemoryRatio of offheap size. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemory | 🔄 Dynamic | <undefined> | Set the max memory of partial aggregation in bytes. When this option is set to a value greater than 0, it will override spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemoryRatio. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemoryRatio | 🔄 Dynamic | 0.1 | Set the max memory of partial aggregation as maxPartialAggregationMemoryRatio of offheap size. Note: this option only works when flushable partial aggregation is enabled. Ignored when spark.gluten.sql.columnar.backend.velox.flushablePartialAggregation=false. | +| spark.gluten.sql.columnar.backend.velox.maxPartitionsPerWritersSession | 🔄 Dynamic | 10000 | Maximum number of partitions per a single table writer instance. | +| spark.gluten.sql.columnar.backend.velox.maxSpillBytes | 🔄 Dynamic | 100G | The maximum file size of a query | +| spark.gluten.sql.columnar.backend.velox.maxSpillFileSize | 🔄 Dynamic | 1GB | The maximum size of a single spill file created | +| spark.gluten.sql.columnar.backend.velox.maxSpillLevel | 🔄 Dynamic | 4 | The max allowed spilling level with zero being the initial spilling level | +| spark.gluten.sql.columnar.backend.velox.maxSpillRunRows | 🔄 Dynamic | 3M | The maximum row size of a single spill run | +| spark.gluten.sql.columnar.backend.velox.maxTargetFileSize | 🔄 Dynamic | 0b | The target file size for each output file when writing data. 0 means no limit on target file size, and the actual file size will be determined by other factors such as max partition number and shuffle batch size. | +| spark.gluten.sql.columnar.backend.velox.memCacheSize | ⚓ Static | 1GB | The memory cache size | +| spark.gluten.sql.columnar.backend.velox.memInitCapacity | 🔄 Dynamic | 8MB | The initial memory capacity to reserve for a newly created Velox query memory pool. | +| spark.gluten.sql.columnar.backend.velox.memoryPoolCapacityTransferAcrossTasks | 🔄 Dynamic | true | Whether to allow memory capacity transfer between memory pools from different tasks. | +| spark.gluten.sql.columnar.backend.velox.memoryUseHugePages | 🔄 Dynamic | false | Use explicit huge pages for Velox memory allocation. | +| spark.gluten.sql.columnar.backend.velox.orc.scan.enabled | 🔄 Dynamic | true | Enable velox orc scan. If disabled, vanilla spark orc scan will be used. | +| spark.gluten.sql.columnar.backend.velox.orcUseColumnNames | 🔄 Dynamic | true | Maps table field names to file field names using names, not indices for ORC files. | +| spark.gluten.sql.columnar.backend.velox.parquet.pageSizeBytes | 🔄 Dynamic | 1MB | The page size in bytes is for compression. | +| spark.gluten.sql.columnar.backend.velox.parquetUseColumnNames | 🔄 Dynamic | true | Maps table field names to file field names using names, not indices for Parquet files. | +| spark.gluten.sql.columnar.backend.velox.prefetchRowGroups | ⚓ Static | 1 | Set the prefetch row groups for velox file scan | +| spark.gluten.sql.columnar.backend.velox.queryTraceEnabled | 🔄 Dynamic | false | Enable query tracing flag. | +| spark.gluten.sql.columnar.backend.velox.reclaimMaxWaitMs | 🔄 Dynamic | 3600000ms | The max time in ms to wait for memory reclaim. | +| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput | 🔄 Dynamic | true | If true, combine small columnar batches together before sending to shuffle. The default minimum output batch size is equal to 0.25 * spark.gluten.sql.columnar.maxBatchSize | +| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput.minSize | 🔄 Dynamic | <undefined> | The minimum batch size for shuffle. If size of an input batch is smaller than the value, it will be combined with other batches before sending to shuffle. Only functions when spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput is set to true. Default value: 0.25 * | +| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInputOuptut.minSize | 🔄 Dynamic | <undefined> | The minimum batch size for shuffle input and output. If size of an input batch is smaller than the value, it will be combined with other batches before sending to shuffle. The same applies for batches output by shuffle read. Only functions when spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput or spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleOutput is set to true. Default value: 0.25 * | +| spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleOutput | 🔄 Dynamic | false | If true, combine small columnar batches together right after shuffle read. The default minimum output batch size is equal to 0.25 * spark.gluten.sql.columnar.maxBatchSize | +| spark.gluten.sql.columnar.backend.velox.showTaskMetricsWhenFinished | 🔄 Dynamic | false | Show velox full task metrics when finished. | +| spark.gluten.sql.columnar.backend.velox.spillFileSystem | 🔄 Dynamic | local | The filesystem used to store spill data. local: The local file system. heap-over-local: Write file to JVM heap if having extra heap space. Otherwise write to local file system. | +| spark.gluten.sql.columnar.backend.velox.spillStrategy | 🔄 Dynamic | auto | none: Disable spill on Velox backend; auto: Let Spark memory manager manage Velox's spilling | +| spark.gluten.sql.columnar.backend.velox.ssdCacheIOThreads | ⚓ Static | 1 | The IO threads for cache promoting | +| spark.gluten.sql.columnar.backend.velox.ssdCachePath | ⚓ Static | /tmp | The folder to store the cache files, better on SSD | +| spark.gluten.sql.columnar.backend.velox.ssdCacheShards | ⚓ Static | 1 | The cache shards | +| spark.gluten.sql.columnar.backend.velox.ssdCacheSize | ⚓ Static | 1GB | The SSD cache size, will do memory caching only if this value = 0 | +| spark.gluten.sql.columnar.backend.velox.ssdCheckpointIntervalBytes | ⚓ Static | 0 | Checkpoint after every 'checkpointIntervalBytes' for SSD cache. 0 means no checkpointing. | +| spark.gluten.sql.columnar.backend.velox.ssdChecksumEnabled | ⚓ Static | false | If true, checksum write to SSD is enabled. | +| spark.gluten.sql.columnar.backend.velox.ssdChecksumReadVerificationEnabled | ⚓ Static | false | If true, checksum read verification from SSD is enabled. | +| spark.gluten.sql.columnar.backend.velox.ssdDisableFileCow | ⚓ Static | false | True if copy on write should be disabled. | +| spark.gluten.sql.columnar.backend.velox.ssdODirect | ⚓ Static | false | The O_DIRECT flag for cache writing | +| spark.gluten.sql.columnar.backend.velox.valueStream.dynamicFilter.enabled | 🔄 Dynamic | false | Whether to apply dynamic filters pushed down from hash probe in the ValueStream (shuffle reader) operator to filter rows before they reach the hash join. | +| spark.gluten.sql.enable.enhancedFeatures | 🔄 Dynamic | true | Enable some features including iceberg native write and other features. | +| spark.gluten.sql.rewrite.castArrayToString | 🔄 Dynamic | true | When true, rewrite `cast(array as String)` to `concat('[', array_join(array, ', ', null), ']')` to allow offloading to Velox. | +| spark.gluten.velox.broadcast.build.targetBytesPerThread | ⚓ Static | 32MB | It is used to calculate the number of hash table build threads. Based on our testing across various thresholds (1MB to 128MB), we recommend a value of 32MB or 64MB, as these consistently provided the most significant performance gains. | +| spark.gluten.velox.castFromVarcharAddTrimNode | 🔄 Dynamic | false | If true, will add a trim node which has the same semantic as vanilla Spark to CAST-from-varchar.Otherwise, do nothing. | ## Gluten Velox backend *experimental* configurations -| Key | Default | Description | -|----------------------------------------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------| -| spark.gluten.velox.abandonDedupHashMap.minPct | 0 | Experimental: abandon hashmap build if duplicated rows are more than this percentile. Value is integer based and range is [0, 100]. | -| spark.gluten.velox.abandonDedupHashMap.minRows | 100000 | Experimental: abandon hashmap build if duplicated rows more than this number. | -| spark.gluten.velox.joinBuildVectorHasherMaxNumDistinct | 1000000 | Experimental: maximum number of distinct values to keep when merging vector hashers in join HashBuild. | -| spark.gluten.velox.minTableRowsForParallelJoinBuild | 1000 | Experimental: the minimum number of table rows that can trigger the parallel hash join table build. | -| spark.gluten.velox.offHeapBroadcastBuildRelation.enabled | false | Experimental: If enabled, broadcast build relation will use offheap memory. Otherwise, broadcast build relation will use onheap memory. | +| Key | Status | Default | Description | +|----------------------------------------------------------|------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------| +| spark.gluten.velox.abandonDedupHashMap.minPct | 🔄 Dynamic | 0 | Experimental: abandon hashmap build if duplicated rows are more than this percentile. Value is integer based and range is [0, 100]. | +| spark.gluten.velox.abandonDedupHashMap.minRows | 🔄 Dynamic | 100000 | Experimental: abandon hashmap build if duplicated rows more than this number. | +| spark.gluten.velox.joinBuildVectorHasherMaxNumDistinct | 🔄 Dynamic | 1000000 | Experimental: maximum number of distinct values to keep when merging vector hashers in join HashBuild. | +| spark.gluten.velox.minTableRowsForParallelJoinBuild | 🔄 Dynamic | 1000 | Experimental: the minimum number of table rows that can trigger the parallel hash join table build. | +| spark.gluten.velox.offHeapBroadcastBuildRelation.enabled | 🔄 Dynamic | false | Experimental: If enabled, broadcast build relation will use offheap memory. Otherwise, broadcast build relation will use onheap memory. | diff --git a/gluten-core/src/main/scala/org/apache/gluten/config/ConfigRegistry.scala b/gluten-core/src/main/scala/org/apache/gluten/config/ConfigRegistry.scala index de52f752b81b..4ebc29de9e63 100644 --- a/gluten-core/src/main/scala/org/apache/gluten/config/ConfigRegistry.scala +++ b/gluten-core/src/main/scala/org/apache/gluten/config/ConfigRegistry.scala @@ -29,6 +29,35 @@ trait ConfigRegistry { require(existing.isEmpty, s"Config entry ${entry.key} already registered!") } + private def registerToSQLConf(entry: ConfigEntry[_], isStatic: Boolean): Unit = { + (entry.key :: entry.alternatives).foreach(registerToSQLConf(entry, _, isStatic)) + } + + private def registerToSQLConf(entry: ConfigEntry[_], key: String, isStatic: Boolean): Unit = { + var builder = + if (isStatic) SQLConf.buildStaticConf(key) else SQLConf.buildConf(key) + if (entry.doc.nonEmpty) { + builder = builder.doc(entry.doc) + } + if (entry.version.nonEmpty) { + builder = builder.version(entry.version) + } + if (!entry.isPublic) { + builder = builder.internal() + } + + val sparkEntry = builder.stringConf.transform { + value => + entry.valueConverter(value) + value + } + if (entry.defaultValue.isDefined) { + sparkEntry.createWithDefaultString(entry.defaultValueString) + } else { + sparkEntry.createOptional + } + } + /** Visible for testing. */ private[config] def allEntries: Seq[ConfigEntry[_]] = { configEntries.values.toSeq @@ -38,6 +67,7 @@ trait ConfigRegistry { ConfigBuilder(key).onCreate { entry => register(entry) + registerToSQLConf(entry, isStatic = false) ConfigRegistry.registerToAllEntries(entry) } } @@ -45,8 +75,8 @@ trait ConfigRegistry { protected def buildStaticConf(key: String): ConfigBuilder = { ConfigBuilder(key).onCreate { entry => - SQLConf.registerStaticConfigKey(key) register(entry) + registerToSQLConf(entry, isStatic = true) ConfigRegistry.registerToAllEntries(entry) } } diff --git a/gluten-core/src/main/scala/org/apache/gluten/config/GlutenCoreConfig.scala b/gluten-core/src/main/scala/org/apache/gluten/config/GlutenCoreConfig.scala index 60bec1124ee4..eb08396772f3 100644 --- a/gluten-core/src/main/scala/org/apache/gluten/config/GlutenCoreConfig.scala +++ b/gluten-core/src/main/scala/org/apache/gluten/config/GlutenCoreConfig.scala @@ -18,6 +18,7 @@ package org.apache.gluten.config import org.apache.spark.internal.Logging import org.apache.spark.network.util.ByteUnit +import org.apache.spark.sql.SparkSession import org.apache.spark.sql.internal.{SQLConf, SQLConfProvider} class GlutenCoreConfig(conf: SQLConf) extends Logging { @@ -62,7 +63,14 @@ class GlutenCoreConfig(conf: SQLConf) extends Logging { */ object GlutenCoreConfig extends ConfigRegistry { override def get: GlutenCoreConfig = { - new GlutenCoreConfig(SQLConf.get) + new GlutenCoreConfig(activeSQLConf) + } + + private[gluten] def activeSQLConf: SQLConf = { + SparkSession.getActiveSession + .filterNot(_.sparkContext.isStopped) + .map(_.sessionState.conf) + .getOrElse(SQLConf.get) } val SPARK_OFFHEAP_SIZE_KEY = "spark.memory.offHeap.size" diff --git a/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala b/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala index 2f8155ce70ee..ecdd3aac052c 100644 --- a/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala +++ b/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala @@ -458,7 +458,7 @@ object GlutenConfig extends ConfigRegistry { val SPARK_MAX_BROADCAST_TABLE_SIZE = "spark.sql.maxBroadcastTableSize" def get: GlutenConfig = { - new GlutenConfig(SQLConf.get) + new GlutenConfig(GlutenCoreConfig.activeSQLConf) } def prefixOf(backendName: String): String = s"spark.gluten.sql.columnar.backend.$backendName" diff --git a/gluten-substrait/src/test/scala/org/apache/gluten/config/AllGlutenConfiguration.scala b/gluten-substrait/src/test/scala/org/apache/gluten/config/AllGlutenConfiguration.scala index 2e6bd0cfcc82..75b79fbaf580 100644 --- a/gluten-substrait/src/test/scala/org/apache/gluten/config/AllGlutenConfiguration.scala +++ b/gluten-substrait/src/test/scala/org/apache/gluten/config/AllGlutenConfiguration.scala @@ -16,6 +16,8 @@ */ package org.apache.gluten.config +import org.apache.spark.sql.internal.SQLConf + import org.scalactic.Prettifier import org.scalactic.source.Position import org.scalatest.Assertions._ @@ -67,44 +69,53 @@ class AllGlutenConfiguration extends AnyFunSuite { s""" |## Spark base configurations for Gluten plugin | - | Key | Recommend Setting | Description - | --- | --- | --- + | Key | Status | Recommend Setting | Description + | --- | --- | --- | --- |""" // scalastyle:off builder += Seq( "spark.plugins", + AllGlutenConfiguration.staticConfigStatus, "org.apache.gluten.GlutenPlugin", "To load Gluten's components by Spark's plug-in loader.").mkString("|") builder += Seq( "spark.memory.offHeap.enabled", + AllGlutenConfiguration.staticConfigStatus, "true", "Gluten use off-heap memory for certain operations.").mkString("|") builder += Seq( "spark.memory.offHeap.size", + AllGlutenConfiguration.staticConfigStatus, "30G", "The absolute amount of memory which can be used for off-heap allocation, in bytes unless otherwise specified.
Note: Gluten Plugin will leverage this setting to allocate memory space for native usage even offHeap is disabled.
The value is based on your system and it is recommended to set it larger if you are facing Out of Memory issue in Gluten Plugin." ).mkString("|") builder += Seq( "spark.shuffle.manager", + AllGlutenConfiguration.staticConfigStatus, "org.apache.spark.shuffle.sort.ColumnarShuffleManager", - "To turn on Gluten Columnar Shuffle Plugin.").mkString("|") + "To turn on Gluten Columnar Shuffle Plugin." + ).mkString("|") builder += Seq( "spark.driver.extraClassPath", + AllGlutenConfiguration.staticConfigStatus, "/path/to/gluten_jar_file", - "Gluten Plugin jar file to prepend to the classpath of the driver.").mkString("|") + "Gluten Plugin jar file to prepend to the classpath of the driver." + ).mkString("|") builder += Seq( "spark.executor.extraClassPath", + AllGlutenConfiguration.staticConfigStatus, "/path/to/gluten_jar_file", - "Gluten Plugin jar file to prepend to the classpath of executors.").mkString("|") + "Gluten Plugin jar file to prepend to the classpath of executors." + ).mkString("|") // scalastyle:on builder ++= s""" |## Gluten configurations | - | Key | Default | Description - | --- | --- | --- + | Key | Status | Default | Description + | --- | --- | --- | --- |""" val allEntries = GlutenConfig.allEntries ++ GlutenCoreConfig.allEntries @@ -116,7 +127,11 @@ class AllGlutenConfiguration extends AnyFunSuite { .foreach { entry => val dft = entry.defaultValueString.replace("<", "<").replace(">", ">") - builder += Seq(s"${entry.key}", s"$dft", s"${entry.doc}") + builder += Seq( + s"${entry.key}", + AllGlutenConfiguration.configStatus(entry), + s"$dft", + s"${entry.doc}") .mkString("|") } @@ -124,8 +139,8 @@ class AllGlutenConfiguration extends AnyFunSuite { s""" |## Gluten *experimental* configurations | - | Key | Default | Description - | --- | --- | --- + | Key | Status | Default | Description + | --- | --- | --- | --- |""" allEntries @@ -135,7 +150,11 @@ class AllGlutenConfiguration extends AnyFunSuite { .foreach { entry => val dft = entry.defaultValueString.replace("<", "<").replace(">", ">") - builder += Seq(s"${entry.key}", s"$dft", s"${entry.doc}") + builder += Seq( + s"${entry.key}", + AllGlutenConfiguration.configStatus(entry), + s"$dft", + s"${entry.doc}") .mkString("|") } @@ -147,6 +166,16 @@ class AllGlutenConfiguration extends AnyFunSuite { } object AllGlutenConfiguration { + private val Anchor = Character.toString(0x2693) + private val CounterclockwiseArrows = new String(Character.toChars(0x1f504)) + + val staticConfigStatus: String = s"$Anchor Static" + val dynamicConfigStatus: String = s"$CounterclockwiseArrows Dynamic" + + def configStatus(entry: ConfigEntry[_]): String = { + if (SQLConf.isStaticConfigKey(entry.key)) staticConfigStatus else dynamicConfigStatus + } + def isRegenerateGoldenFiles: Boolean = sys.env.get("GLUTEN_UPDATE").contains("1") def getCodeSourceLocation[T](clazz: Class[T]): String = { diff --git a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/GlutenRuntimeConfigSuite.scala b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/GlutenRuntimeConfigSuite.scala index cff47c77e372..e24e88e55404 100644 --- a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/GlutenRuntimeConfigSuite.scala +++ b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/GlutenRuntimeConfigSuite.scala @@ -16,4 +16,26 @@ */ package org.apache.spark.sql -class GlutenRuntimeConfigSuite extends RuntimeConfigSuite with GlutenTestsTrait {} +import org.apache.gluten.config.GlutenConfig + +class GlutenRuntimeConfigSuite extends RuntimeConfigSuite with GlutenTestsTrait { + test("Gluten configs report correct runtime modifiability") { + val conf = SparkSession.active.conf + assert(conf.isModifiable(GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key)) + assert(!conf.isModifiable(GlutenConfig.GLUTEN_UI_ENABLED.key)) + } + + test("GlutenConfig reads active SparkSession runtime configs") { + val conf = SparkSession.active.conf + val key = GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key + val original = conf.get(key) + try { + conf.set(key, false) + assert(!GlutenConfig.get.enableColumnarFileScan) + conf.set(key, true) + assert(GlutenConfig.get.enableColumnarFileScan) + } finally { + conf.set(key, original) + } + } +} diff --git a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/GlutenSparkSqlParserSuite.scala b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/GlutenSparkSqlParserSuite.scala index b0e3b94a162e..4d12ed8f7753 100644 --- a/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/GlutenSparkSqlParserSuite.scala +++ b/gluten-ut/spark40/src/test/scala/org/apache/spark/sql/execution/GlutenSparkSqlParserSuite.scala @@ -18,4 +18,37 @@ package org.apache.spark.sql.execution import org.apache.spark.sql.GlutenSQLTestsBaseTrait -class GlutenSparkSqlParserSuite extends SparkSqlParserSuite with GlutenSQLTestsBaseTrait {} +import org.scalactic.source.Position +import org.scalatest.Tag + +class GlutenSparkSqlParserSuite extends SparkSqlParserSuite with GlutenSQLTestsBaseTrait { + private var registerQuotedConfigParserTest = false + + override protected def test(testName: String, testTags: Tag*)(testFun: => Any)(implicit + pos: Position): Unit = { + if (isConfigParserCoverage(testName) && !registerQuotedConfigParserTest) { + () + } else { + super.test(testName, testTags: _*)(testFun)(pos) + } + } + + registerQuotedConfigParserTest = true + test("Checks if SET/RESET can parse all the configurations") { + sqlConf.getAllDefinedConfs.map(_._1).foreach { + key: String => + val quotedKey = quoteConfigKey(key) + spark.sessionState.sqlParser.parsePlan(s"SET $quotedKey") + spark.sessionState.sqlParser.parsePlan(s"RESET $quotedKey") + } + } + registerQuotedConfigParserTest = false + + private def isConfigParserCoverage(testName: String): Boolean = { + testName == "Checks if SET/RESET can parse all the configurations" + } + + private def quoteConfigKey(key: String): String = { + s"`${key.replace("`", "``")}`" + } +} diff --git a/gluten-ut/spark41/src/test/scala/org/apache/spark/sql/GlutenRuntimeConfigSuite.scala b/gluten-ut/spark41/src/test/scala/org/apache/spark/sql/GlutenRuntimeConfigSuite.scala index 414c10ff8a39..78e702108465 100644 --- a/gluten-ut/spark41/src/test/scala/org/apache/spark/sql/GlutenRuntimeConfigSuite.scala +++ b/gluten-ut/spark41/src/test/scala/org/apache/spark/sql/GlutenRuntimeConfigSuite.scala @@ -16,6 +16,28 @@ */ package org.apache.spark.sql +import org.apache.gluten.config.GlutenConfig + import org.apache.spark.sql.shim.GlutenTestsTrait -class GlutenRuntimeConfigSuite extends RuntimeConfigSuite with GlutenTestsTrait {} +class GlutenRuntimeConfigSuite extends RuntimeConfigSuite with GlutenTestsTrait { + test("Gluten configs report correct runtime modifiability") { + val conf = SparkSession.active.conf + assert(conf.isModifiable(GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key)) + assert(!conf.isModifiable(GlutenConfig.GLUTEN_UI_ENABLED.key)) + } + + test("GlutenConfig reads active SparkSession runtime configs") { + val conf = SparkSession.active.conf + val key = GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key + val original = conf.get(key) + try { + conf.set(key, false) + assert(!GlutenConfig.get.enableColumnarFileScan) + conf.set(key, true) + assert(GlutenConfig.get.enableColumnarFileScan) + } finally { + conf.set(key, original) + } + } +} diff --git a/gluten-ut/spark41/src/test/scala/org/apache/spark/sql/execution/GlutenSparkSqlParserSuite.scala b/gluten-ut/spark41/src/test/scala/org/apache/spark/sql/execution/GlutenSparkSqlParserSuite.scala index b0e3b94a162e..4d12ed8f7753 100644 --- a/gluten-ut/spark41/src/test/scala/org/apache/spark/sql/execution/GlutenSparkSqlParserSuite.scala +++ b/gluten-ut/spark41/src/test/scala/org/apache/spark/sql/execution/GlutenSparkSqlParserSuite.scala @@ -18,4 +18,37 @@ package org.apache.spark.sql.execution import org.apache.spark.sql.GlutenSQLTestsBaseTrait -class GlutenSparkSqlParserSuite extends SparkSqlParserSuite with GlutenSQLTestsBaseTrait {} +import org.scalactic.source.Position +import org.scalatest.Tag + +class GlutenSparkSqlParserSuite extends SparkSqlParserSuite with GlutenSQLTestsBaseTrait { + private var registerQuotedConfigParserTest = false + + override protected def test(testName: String, testTags: Tag*)(testFun: => Any)(implicit + pos: Position): Unit = { + if (isConfigParserCoverage(testName) && !registerQuotedConfigParserTest) { + () + } else { + super.test(testName, testTags: _*)(testFun)(pos) + } + } + + registerQuotedConfigParserTest = true + test("Checks if SET/RESET can parse all the configurations") { + sqlConf.getAllDefinedConfs.map(_._1).foreach { + key: String => + val quotedKey = quoteConfigKey(key) + spark.sessionState.sqlParser.parsePlan(s"SET $quotedKey") + spark.sessionState.sqlParser.parsePlan(s"RESET $quotedKey") + } + } + registerQuotedConfigParserTest = false + + private def isConfigParserCoverage(testName: String): Boolean = { + testName == "Checks if SET/RESET can parse all the configurations" + } + + private def quoteConfigKey(key: String): String = { + s"`${key.replace("`", "``")}`" + } +}