Spark源码解读(一)

一、spark通过yarn启动的过程

在spark通过yarn提交的时候,会去执行SparkSubmit的main方法。

override def main(args: Array[String]): Unit = {
    val submit = new SparkSubmit() {
      self =>
      override protected def parseArguments(args: Array[String]): SparkSubmitArguments = 
      {
        new SparkSubmitArguments(args) {
         ...
        }
      }
      // 然后到这里进行dosubmit
      override def doSubmit(args: Array[String]): Unit = {
        try {
          super.doSubmit(args)
        } 
      }
    }
    submit.doSubmit(args) // 提交任务
  }
--------------------------------------------------
def doSubmit(args: Array[String]): Unit = {
    ...  
    val appArgs = parseArguments(args)  // 通过解析参数获得appArgs.action进行行为的匹配
    if (appArgs.verbose) {
      logInfo(appArgs.toString)
    }
    appArgs.action match {
      case SparkSubmitAction.SUBMIT => submit(appArgs, uninitLog)
      case SparkSubmitAction.KILL => kill(appArgs)
      case SparkSubmitAction.REQUEST_STATUS => requestStatus(appArgs)
      case SparkSubmitAction.PRINT_VERSION => printVersion()
    }
  }
--------------------------------------------------
protected def parseArguments(args: Array[String]): SparkSubmitArguments = {
    new SparkSubmitArguments(args) // 去new了一个对象
}
--------------------------------------------------
parse(args.asJava)        // 在初始化的时候解析命令行参数

loadEnvironmentArguments  // 设置action的默认参数
--------------------------------------------------
解析命令行参数
Pattern eqSeparatedOpt = Pattern.compile("(--[^=]+)=(.+)");
...
  if (m.matches()) {  //  将参数解析成为参数名和值的形式
    arg = m.group(1);
    value = m.group(2);
  }
...
handle(name, value)  // 用handle方法去匹配
--------------------------------------------------
action的默认参数
action = Option(action).getOrElse(SUBMIT)  // 会默认的设置为submit

关于spark的参数都出现在了handle方法中

class SparkSubmitOptionParser {
  protected final String CLASS = "--class";
  protected final String CONF = "--conf";
  protected final String DEPLOY_MODE = "--deploy-mode";
  ...... 
  
  override protected def handle(opt: String, value: String): Boolean = {
      opt match {
        case NAME => name = value
        case MASTER => master = value
        case CLASS => mainClass = value
        ...
      }
  }
}

由于上面的action默认是submit类型,所以我们进入submit

private def submit(args: SparkSubmitArguments, uninitLog: Boolean): Unit = {
    def doRunMain(): Unit = {
      // 判断是否有用户代理,走else
      if (args.proxyUser != null) {
       ...
      } else {
        runMain(args, uninitLog)
      }
    }

    // 判断是否是本地模式,由于是yarn模式走else
    if (args.isStandaloneCluster && args.useRest) {
      ...
    } else {
      doRunMain()
    }
  }
-----------------------------------------------------
private def runMain(args: SparkSubmitArguments, uninitLog: Boolean): Unit = {
    //  准备提交环境
    val (childArgs, childClasspath, sparkConf, childMainClass) = prepareSubmitEnvironment(args)

    ...
    val loader = getSubmitClassLoader(sparkConf)  // 类加载器
    var mainClass: Class[_] = null
    try {
      mainClass = Utils.classForName(childMainClass)   // 通过类名加载类
    }

    ...
    // 如果 mainClass 这个类继承SparkApplication 就直接执行mainclass的构造函数
    val app: SparkApplication = if (classOf[SparkApplication].isAssignableFrom(mainClass)) {
      mainClass.getConstructor().newInstance().asInstanceOf[SparkApplication]
    } else {
      new JavaMainApplication(mainClass)
    }
    app.start(childArgs.toArray, sparkConf)   // 启动
}

所以mainclass非常重要,mainclass取决于childMainClass,childMainClass是由prepareSubmitEnvironment这个返回的。

// 所以mainclass就是YarnClusterApplication
private[deploy] val YARN_CLUSTER_SUBMIT_CLASS =
    "org.apache.spark.deploy.yarn.YarnClusterApplication"

if (isYarnCluster) {
  childMainClass = YARN_CLUSTER_SUBMIT_CLASS   // 进行赋值
}

在yarn中调用start方法

private[spark] class YarnClusterApplication extends SparkApplication {
  override def start(args: Array[String], conf: SparkConf): Unit = {
    conf.remove(JARS)
    conf.remove(FILES)
    conf.remove(ARCHIVES)
    // yarn创建了一个client
    new Client(new ClientArguments(args), conf, null).run()
  }
}
---------------------------------------------------
初始化clientArguments
private[spark] class ClientArguments(args: Array[String]) {
  var userJar: String = null
  var userClass: String = null
  var primaryPyFile: String = null
  var primaryRFile: String = null
  var userArgs: ArrayBuffer[String] = new ArrayBuffer[String]()

  parseArgs(args.toList)  // 解析参数
  private def parseArgs(inputArgs: List[String]): Unit = {
    var args = inputArgs

    while (!args.isEmpty) {
      args match {
        case ("--jar") :: value :: tail =>
          userJar = value
          args = tail
        case ("--class") :: value :: tail =>
          userClass = value
          args = tail
        ...
     }
  }
}
----------------------------------------------------
初始化client
private[spark] class Client(...)extends Logging {
  // 创建yarn的客户端
  private val yarnClient = YarnClient.createYarnClient
}
----------------------------------------------------
最后执行run方法
def run(): Unit = {
    // 提交作业
    this.appId = submitApplication()
    if (!launcherBackend.isConnected() && fireAndForget) {
      val report = getApplicationReport(appId)
      val state = report.getYarnApplicationState
    } else {
      val YarnAppReport(appState, finalState, diags) = monitorApplication(appId)
    }
  }
---------------------------------------------------
def submitApplication(): ApplicationId = {
  launcherBackend.connect()
  yarnClient.init(hadoopConf)
  yarnClient.start()  // 启动yarn客户端

  val newApp = yarnClient.createApplication()  // 从RM中创建一个应用获得一个全局的id
  val newAppResponse = newApp.getNewApplicationResponse()
  appId = newAppResponse.getApplicationId()
  ...
  val containerContext = createContainerLaunchContext(newAppResponse)  // 创建容器环境
  val appContext = createApplicationSubmissionContext(newApp, containerContext) // 创建提交环境

  yarnClient.submitApplication(appContext)  // 进行提交
  launcherBackend.setAppId(appId.toString)
  reportLauncherState(SparkAppHandle.State.SUBMITTED)
}
---------------------------------------------------
创建容器环境
private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse)
    : ContainerLaunchContext = {
    val amClass =
      if (isClusterMode) {  // 如果是集群模式
        Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName
      } else {              // 如果是非集群模式
        Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName
      }
    val amArgs =
      Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs 
    ++
      Seq("--properties-file",
        buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, SPARK_CONF_FILE)) ++
      Seq("--dist-cache-conf",
        buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, DIST_CACHE_CONF_FILE))

    // 进行命令的拼接
    val commands = prefixEnv ++
      Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++
      javaOpts ++ amArgs ++
      Seq(
        "1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout",
        "2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")
}

然后会执行 org.apache.spark.deploy.yarn.ApplicationMaster 这个文件,用Java的方式。

首先执行 ApplicationMaster 的main方法
def main(args: Array[String]): Unit = {
    SignalUtils.registerLogger(log)
    val amArgs = new ApplicationMasterArguments(args)  // 去解析参数和之前类似不再赘述
    val sparkConf = new SparkConf()
    ....
    // 创建yarn的配置
    val yarnConf = new YarnConfiguration(SparkHadoopUtil.newConfiguration(sparkConf))
    // 1. 创建applicationMaster
    master = new ApplicationMaster(amArgs, sparkConf, yarnConf)
    ... 
    // 2. master执行run
    ugi.doAs(new PrivilegedExceptionAction[Unit]() {
      override def run(): Unit = System.exit(master.run())
    })
}
----------------------------------------------------------------
1. ApplicationMaster的创建

private val client = new YarnRMClient() // 创建RMclient, 并且会创建applicationMaster(AM)和resourceManager(RM)的连接
----------------------------------------------------------------
2. master执行run, 根据不同的模式执行不同的方法

final def run(): Int = {
    ...
    if (isClusterMode) {
      runDriver()   // applicationMaster的runDriver
    } else {
      runExecutorLauncher()
    }
}

启动集群的源码

private def runDriver(): Unit = {
    // 1. 启动用户程序,主要是去创建sparkContext
    userClassThread = startUserApplication()

    try {
      // 这里通过主线程的阻塞等待sparkContext创建完成
      val sc = ThreadUtils.awaitResult(sparkContextPromise.future,
        Duration(totalWaitTime, TimeUnit.MILLISECONDS))
      if (sc != null) {
        val rpcEnv = sc.env.rpcEnv  // 赋值通信环境
        // 注册AM 目的是和yarn进行连接申请资源
        registerAM(host, port, userConf, sc.ui.map(_.webUrl), appAttemptId)

        val driverRef = rpcEnv.setupEndpointRef(
          RpcAddress(host, port),
          YarnSchedulerBackend.ENDPOINT_NAME)
        // 2. 创建资源分配
        createAllocator(driverRef, userConf, rpcEnv, appAttemptId, distCacheConf)
      } 
      resumeDriver()  // 创建完成以后恢复用户的程序,在最后进行源码的讲解
      userClassThread.join()
    } 
  }
---------------------------------------------------------------
1. 启动用户程序,主要是去创建sparkContext

private def startUserApplication(): Thread = {
  ... 
  // 通过类加载器加载--class 参数的类名的main方法
  val mainMethod = userClassLoader.loadClass(args.userClass)
    .getMethod("main", classOf[Array[String]])
  // new一个线程去创建
  val userThread = new Thread {
    override def run(): Unit = {
      try {
        // 判断你的main方法是不是静态方法,静态直接报错,不是就直接执行
        if (!Modifier.isStatic(mainMethod.getModifiers)) {
          finish(FinalApplicationStatus.FAILED, ApplicationMaster.EXIT_EXCEPTION_USER_CLASS)
        } else {
        // sparkContext就会在你的方法中进行初始化
          mainMethod.invoke(null, userArgs.toArray)
          finish(FinalApplicationStatus.SUCCEEDED, ApplicationMaster.EXIT_SUCCESS)
        }
      }
    }
  }
 userThread.setName("Driver")
 userThread.start()  // 启动driver线程
}
----------------------------------------------------------------
2. 创建资源分配

private def createAllocator(): Unit = {
    ...
    allocator = client.createAllocator(yarnConf,_sparkConf,appAttemptId,driverUrl,
      driverRef,securityMgr,localResources)

    allocator.allocateResources()  // 2.1 可用资源列表
}
----------------------------------------------------------------
2.1 可用资源列表

def allocateResources(): Unit = synchronized {
  val allocatedContainers = allocateResponse.getAllocatedContainers() // 获取可分配的容器
  if (allocatedContainers.size > 0) {     
    handleAllocatedContainers(allocatedContainers.asScala.toSeq)  // 2.1.1 有可用资源需要进行分配处理
  }
}
----------------------------------------------------------------
2.1.1 可用资源分配处理

def handleAllocatedContainers(allocatedContainers: Seq[Container]): Unit = {
  ... // 首先给容器进行分类
  runAllocatedContainers(containersToUse)  // 2.1.1.1 运行可分配的的容器
}
----------------------------------------------------------------
2.1.1.1 运行可分配的的容器

private def runAllocatedContainers(containersToUse: ArrayBuffer[Container]): Unit = synchronized {
    // 循环可用容器列表
    for (container <- containersToUse) {
      val containerCores = rp.getExecutorCores.getOrElse(defaultResources.cores)
      val rpRunningExecs = getOrUpdateRunningExecutorForRPId(rpId).size
      // 如果需求数量仍大于正在运行的数量,则创建容器
      if (rpRunningExecs < getOrUpdateTargetNumExecutorsForRPId(rpId)) {
        getOrUpdateNumExecutorsStartingForRPId(rpId).incrementAndGet()
        if (launchContainers) {
          launcherPool.execute(() => {
            try {
              new ExecutorRunnable().run() // 2.1.1.1.1 通过线程池的方式进行创建
              updateInternalState()
            } 
          })
        } 
      } 
    }
  }
----------------------------------------------------------------
2.1.1.1.1 通过线程池的方式进行创建

def run(): Unit = {
  nmClient = NMClient.createNMClient()
  nmClient.init(conf)
  nmClient.start()   
  startContainer()  // 启动容器
}
----------------------------------------------------------------
2.1.1.1.1.1 启动nodeManager

def startContainer(): java.util.Map[String, ByteBuffer] = {
  val commands = prepareCommand()  // 1. 准备指令
  ...
  nmClient.startContainer(container.get, ctx) // 启动容器
}
----------------------------------------------------------------
2.1.1.1.1.1.1 准备指令
private def prepareCommand(): List[String] = {
  // 这里又是通过/bin/java的方式去启动YarnCoarseGrainedExecutorBackend,目的是启动executor
  val commands = prefixEnv ++
      Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++
      javaOpts ++
      Seq("org.apache.spark.executor.YarnCoarseGrainedExecutorBackend",
        "--driver-url", masterAddress,
        "--executor-id", executorId,
        "--hostname", hostname,
        "--cores", executorCores.toString,
        "--app-id", appId,
        "--resourceProfileId", resourceProfileId.toString) ++
      userClassPath ++
      Seq(
        s"1>${ApplicationConstants.LOG_DIR_EXPANSION_VAR}/stdout",
        s"2>${ApplicationConstants.LOG_DIR_EXPANSION_VAR}/stderr")
}

之后继续看YarnCoarseGrainedExecutorBackend做了什么处理

private[spark] object YarnCoarseGrainedExecutorBackend extends Logging {
  // 执行main方法,YarnCoarseGrainedExecutorBackend就是我们的终端
  def main(args: Array[String]): Unit = {
    val createFn: (RpcEnv, CoarseGrainedExecutorBackend.Arguments, SparkEnv, ResourceProfile) =>
      CoarseGrainedExecutorBackend = { case (rpcEnv, arguments, env, resourceProfile) =>
      new YarnCoarseGrainedExecutorBackend(rpcEnv, arguments.driverUrl, arguments.executorId,
        arguments.bindAddress, arguments.hostname, arguments.cores, arguments.userClassPath.toSeq,
        env, arguments.resourcesFileOpt, resourceProfile)
    }
    val backendArgs = CoarseGrainedExecutorBackend.parseArguments(args,
      this.getClass.getCanonicalName.stripSuffix("$"))
    CoarseGrainedExecutorBackend.run(backendArgs, createFn)  // 1. 粗粒度的executor执行
    System.exit(0)
  }
}
-----------------------------------------------------------
1. 粗粒度的executor执行

private[spark] class CoarseGrainedExecutorBackend()
  extends IsolatedRpcEndpoint with ExecutorBackend with Logging {

  // 信箱发送onstart就开始调用当前方法
  override def onStart(): Unit = {
    if (env.conf.get(DECOMMISSION_ENABLED)) {
      SignalUtils.register("PWR", "Failed to register SIGPWR handler - " +
        "disabling executor decommission feature.") (self.askSync[Boolean](ExecutorSigPWRReceived))
    }

    try {
      _resources = parseOrFindResources(resourcesFileOpt)
    } 
    rpcEnv.asyncSetupEndpointRefByURI(driverUrl).flatMap { ref =>
     
      driver = Some(ref)
      // 发送请求,用于注册executor,接受消息的是sparkContext,接受的方法是receiveAndReply
      ref.ask[Boolean](RegisterExecutor(executorId, self, hostname, cores, extractLogUrls,
        extractAttributes, _resources, resourceProfile.id))
    }(ThreadUtils.sameThread).onComplete {  
      case Success(_) =>   // 当回复true的时候又会发送RegisteredExecutor消息
        self.send(RegisteredExecutor)
      case Failure(e) =>
        exitExecutor(1, s"Cannot register with driver: $driverUrl", e, notifyDriver = false)
    }(ThreadUtils.sameThread)
  }

  def run(...): Unit = {
   ...
   // 创建executor的运行环境
    val env = SparkEnv.createExecutorEnv(driverConf, arguments.executorId, 
  arguments.bindAddress,
          arguments.hostname, arguments.cores, cfg.ioEncryptionKey, isLocal = false)

    // 1.1 设置终端,YarnCoarseGrainedExecutorBackend就是我们的终端
    env.rpcEnv.setupEndpoint("Executor",
         backendCreateFn(env.rpcEnv, arguments, env, cfg.resourceProfile))
  }
    
  // 接收消息
  override def receive: PartialFunction[Any, Unit] = {
    case RegisteredExecutor =>
      // 真正的创建executor
      try {
        executor = new Executor(executorId, hostname, env, userClassPath, isLocal = false,
          resources = _resources)
        driver.get.send(LaunchedExecutor(executorId))
      }
}
-----------------------------------------------------------
1.1 设置终端

override def setupEndpoint(name: String, endpoint: RpcEndpoint): RpcEndpointRef = {
  dispatcher.registerRpcEndpoint(name, endpoint)  // 1.1.1
}
-----------------------------------------------------------
1.1.1

def registerRpcEndpoint(name: String, endpoint: RpcEndpoint): NettyRpcEndpointRef = {
  val addr = RpcEndpointAddress(nettyEnv.address, name)
  val endpointRef = new NettyRpcEndpointRef(nettyEnv.conf, addr, nettyEnv)
  synchronized {
     var messageLoop: MessageLoop = null
      try {
        messageLoop = endpoint match {
          case e: IsolatedRpcEndpoint =>
            new DedicatedMessageLoop(name, e, this)  // 1.1.1.1 去创建DedicatedMessageLoop
          case _ =>
            sharedLoop.register(name, endpoint)
            sharedLoop
        }
        endpoints.put(name, messageLoop)
      }
  }
}
-----------------------------------------------------------
1.1.1.1

private class DedicatedMessageLoop(
    name: String,
    endpoint: IsolatedRpcEndpoint,
    dispatcher: Dispatcher)
  extends MessageLoop(dispatcher) {
  // 收件箱
  private val inbox = new Inbox(name, endpoint)
  // 线程池
  override protected val threadpool = if (endpoint.threadCount() > 1) {
    ThreadUtils.newDaemonCachedThreadPool(s"dispatcher-$name", endpoint.threadCount())
  } else {
    ThreadUtils.newDaemonSingleThreadExecutor(s"dispatcher-$name")
  }
}
-----------------------------------------------------------
private[netty] class Inbox(val endpointName: String, val endpoint: RpcEndpoint)
  extends Logging {
  // 自己给自己发消息,用于启动
  inbox.synchronized {
    messages.add(OnStart)
  }
}

endPoint的生命周期:constructor -> onStart -> receive* -> onStop

class SparkContext(config: SparkConf) extends Logging {
  ...
  private var _schedulerBackend: SchedulerBackend = _   // 消息接收
}
-------------------------------------------------------
SchedulerBackend 特质的集群实现

private[spark]
class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: RpcEnv)
  extends ExecutorAllocationClient with SchedulerBackend with Logging {

  // 内部类
  class DriverEndpoint extends IsolatedRpcEndpoint with Logging {
    override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = {
        // 匹配到和刚才发送的消息类型一样
        case RegisterExecutor(executorId, executorRef, hostname, cores, logUrls,
          attributes, resources, resourceProfileId) =>
          ... 
          totalCoreCount.addAndGet(cores)   // 增加总的核数
          totalRegisteredExecutors.addAndGet(1)
          context.reply(true)   // 回复消息
        }
    }
  }
}

resumeDriver的原理,在sparkContext中

_taskScheduler.postStartHook()
-----------------------------------------------------
private[spark] class YarnClusterScheduler(sc: SparkContext) extends YarnScheduler(sc) {
  override def postStartHook(): Unit = {
    ApplicationMaster.sparkContextInitialized(sc) // 环境初始化并完成
    super.postStartHook()  // 
  }
}
-----------------------------------------------------
override def postStartHook(): Unit = {
  waitBackendReady()
}
-----------------------------------------------------
private def waitBackendReady(): Unit = {
  if (backend.isReady) {
    return
  }
  while (!backend.isReady) {
    if (sc.stopped.get) {
      throw new IllegalStateException("Spark context stopped while waiting for backend")
    }
    synchronized {
      this.wait(100)
    }
  }
}

到此driver、executor、RM、NM、AM,都已经创建成功,完结,撒花,撒花,撒花。

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值