|
| 1 | +package io.cdap.cdap.app.runtime.spark.submit; |
| 2 | + |
| 3 | +import com.google.common.base.Function; |
| 4 | +import com.google.common.collect.ImmutableList; |
| 5 | +import io.cdap.cdap.app.runtime.spark.SparkRuntimeContext; |
| 6 | +import io.cdap.cdap.runtime.spi.runtimejob.LaunchMode; |
| 7 | +import org.apache.hadoop.conf.Configuration; |
| 8 | +import org.apache.hadoop.yarn.api.ApplicationConstants; |
| 9 | +import org.apache.twill.filesystem.LocationFactory; |
| 10 | +import org.jetbrains.annotations.Nullable; |
| 11 | +import io.cdap.cdap.internal.app.runtime.distributed.LocalizeResource; |
| 12 | +import org.slf4j.Logger; |
| 13 | +import org.slf4j.LoggerFactory; |
| 14 | + |
| 15 | +import java.util.HashMap; |
| 16 | +import java.util.Map; |
| 17 | +import java.util.regex.Matcher; |
| 18 | +import java.util.regex.Pattern; |
| 19 | + |
| 20 | +public class ServerlessDataprocSubmitter extends DistributedSparkSubmitter { |
| 21 | + |
| 22 | + private static final Logger LOG = LoggerFactory.getLogger(ServerlessDataprocSubmitter.class); |
| 23 | + |
| 24 | + private static final Function<LocalizeResource, String> RESOURCE_TO_PATH = input -> |
| 25 | + input.getURI().toString().split("#")[0]; |
| 26 | + private static final Pattern LOCAL_MASTER_PATTERN = Pattern.compile("local\\[([0-9]+|\\*)\\]"); |
| 27 | + |
| 28 | + |
| 29 | + public ServerlessDataprocSubmitter(Configuration hConf, LocationFactory locationFactory, |
| 30 | + String hostname, SparkRuntimeContext runtimeContext, |
| 31 | + @Nullable String schedulerQueueName, LaunchMode launchMode) { |
| 32 | + super(hConf, locationFactory, hostname, runtimeContext, schedulerQueueName, launchMode); |
| 33 | + } |
| 34 | + |
| 35 | + @Override |
| 36 | + protected void addMaster(Map<String, String> configs, ImmutableList.Builder<String> argBuilder) { |
| 37 | + // Use at least two threads for Spark Streaming |
| 38 | + String masterArg = "local[2]"; |
| 39 | + |
| 40 | + String master = configs.get("spark.master"); |
| 41 | + if (master != null) { |
| 42 | + Matcher matcher = LOCAL_MASTER_PATTERN.matcher(master); |
| 43 | + if (matcher.matches()) { |
| 44 | + masterArg = "local[" + matcher.group(1) + "]"; |
| 45 | + } |
| 46 | + } |
| 47 | + argBuilder.add("--master").add(masterArg); |
| 48 | + } |
| 49 | + |
| 50 | + @Override |
| 51 | + protected Map<String, String> generateSubmitConf(Map<String, String> appConf) { |
| 52 | + Map<String, String> config = new HashMap<>(); |
| 53 | + config.put("spark.executorEnv.CDAP_LOG_DIR", ApplicationConstants.LOG_DIR_EXPANSION_VAR); |
| 54 | + // TODO : Error : for distributed spark : $destFile exists and does not match contents |
| 55 | + config.put("spark.files",""); |
| 56 | + config.put("spark.jars",""); |
| 57 | + config.put("spark.repl.local.jars",""); |
| 58 | + // TODO : Error : DataprocMetricsListener is not a subclass of org.apache.spark.scheduler.SparkListenerInterface |
| 59 | + config.put("spark.dataproc.listeners",""); |
| 60 | + |
| 61 | + // Make Spark UI runs on random port. By default, Spark UI runs on port 4040 and it will do a sequential search |
| 62 | + // of the next port if 4040 is already occupied. However, during the process, it unnecessarily logs big stacktrace |
| 63 | + // as WARN, which pollute the logs a lot if there are concurrent Spark job running (e.g. a fork in Workflow). |
| 64 | + config.put("spark.ui.port", "0"); |
| 65 | + |
| 66 | + return config; |
| 67 | + } |
| 68 | + |
| 69 | + |
| 70 | +} |
0 commit comments