Skip to content

Commit 9d668b7

Browse files
CodingCatrxin
authored andcommitted
[SPARK-9602] remove "Akka/Actor" words from comments
https://issues.apache.org/jira/browse/SPARK-9602 Although we have hidden Akka behind RPC interface, I found that the Akka/Actor-related comments are still spreading everywhere. To make it consistent, we shall remove "actor"/"akka" words from the comments... Author: CodingCat <[email protected]> Closes apache#7936 from CodingCat/SPARK-9602 and squashes the following commits: e8296a3 [CodingCat] remove actor words from comments
1 parent ab8ee1a commit 9d668b7

File tree

16 files changed

+27
-31
lines changed

16 files changed

+27
-31
lines changed

core/src/main/scala/org/apache/spark/api/python/PythonRDD.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -794,7 +794,7 @@ private class PythonAccumulatorParam(@transient serverHost: String, serverPort:
794794

795795
/**
796796
* We try to reuse a single Socket to transfer accumulator updates, as they are all added
797-
* by the DAGScheduler's single-threaded actor anyway.
797+
* by the DAGScheduler's single-threaded RpcEndpoint anyway.
798798
*/
799799
@transient var socket: Socket = _
800800

core/src/main/scala/org/apache/spark/deploy/LocalSparkCluster.scala

-4
Original file line numberDiff line numberDiff line change
@@ -73,12 +73,8 @@ class LocalSparkCluster(
7373
def stop() {
7474
logInfo("Shutting down local Spark cluster.")
7575
// Stop the workers before the master so they don't get upset that it disconnected
76-
// TODO: In Akka 2.1.x, ActorSystem.awaitTermination hangs when you have remote actors!
77-
// This is unfortunate, but for now we just comment it out.
7876
workerRpcEnvs.foreach(_.shutdown())
79-
// workerActorSystems.foreach(_.awaitTermination())
8077
masterRpcEnvs.foreach(_.shutdown())
81-
// masterActorSystems.foreach(_.awaitTermination())
8278
masterRpcEnvs.clear()
8379
workerRpcEnvs.clear()
8480
}

core/src/main/scala/org/apache/spark/deploy/client/AppClient.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ private[spark] class AppClient(
257257
}
258258

259259
def start() {
260-
// Just launch an actor; it will call back into the listener.
260+
// Just launch an rpcEndpoint; it will call back into the listener.
261261
endpoint = rpcEnv.setupEndpoint("AppClient", new ClientEndpoint(rpcEnv))
262262
}
263263

core/src/main/scala/org/apache/spark/deploy/master/LeaderElectionAgent.scala

+3-3
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ import org.apache.spark.annotation.DeveloperApi
2626
*/
2727
@DeveloperApi
2828
trait LeaderElectionAgent {
29-
val masterActor: LeaderElectable
29+
val masterInstance: LeaderElectable
3030
def stop() {} // to avoid noops in implementations.
3131
}
3232

@@ -37,7 +37,7 @@ trait LeaderElectable {
3737
}
3838

3939
/** Single-node implementation of LeaderElectionAgent -- we're initially and always the leader. */
40-
private[spark] class MonarchyLeaderAgent(val masterActor: LeaderElectable)
40+
private[spark] class MonarchyLeaderAgent(val masterInstance: LeaderElectable)
4141
extends LeaderElectionAgent {
42-
masterActor.electedLeader()
42+
masterInstance.electedLeader()
4343
}

core/src/main/scala/org/apache/spark/deploy/master/MasterMessages.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,5 +38,5 @@ private[master] object MasterMessages {
3838

3939
case object BoundPortsRequest
4040

41-
case class BoundPortsResponse(actorPort: Int, webUIPort: Int, restPort: Option[Int])
41+
case class BoundPortsResponse(rpcEndpointPort: Int, webUIPort: Int, restPort: Option[Int])
4242
}

core/src/main/scala/org/apache/spark/deploy/master/ZooKeeperLeaderElectionAgent.scala

+3-3
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import org.apache.curator.framework.CuratorFramework
2222
import org.apache.curator.framework.recipes.leader.{LeaderLatchListener, LeaderLatch}
2323
import org.apache.spark.deploy.SparkCuratorUtil
2424

25-
private[master] class ZooKeeperLeaderElectionAgent(val masterActor: LeaderElectable,
25+
private[master] class ZooKeeperLeaderElectionAgent(val masterInstance: LeaderElectable,
2626
conf: SparkConf) extends LeaderLatchListener with LeaderElectionAgent with Logging {
2727

2828
val WORKING_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark") + "/leader_election"
@@ -73,10 +73,10 @@ private[master] class ZooKeeperLeaderElectionAgent(val masterActor: LeaderElecta
7373
private def updateLeadershipStatus(isLeader: Boolean) {
7474
if (isLeader && status == LeadershipStatus.NOT_LEADER) {
7575
status = LeadershipStatus.LEADER
76-
masterActor.electedLeader()
76+
masterInstance.electedLeader()
7777
} else if (!isLeader && status == LeadershipStatus.LEADER) {
7878
status = LeadershipStatus.NOT_LEADER
79-
masterActor.revokedLeadership()
79+
masterInstance.revokedLeadership()
8080
}
8181
}
8282

core/src/main/scala/org/apache/spark/deploy/worker/Worker.scala

+4-3
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ private[deploy] class Worker(
228228
/**
229229
* Re-register with the master because a network failure or a master failure has occurred.
230230
* If the re-registration attempt threshold is exceeded, the worker exits with error.
231-
* Note that for thread-safety this should only be called from the actor.
231+
* Note that for thread-safety this should only be called from the rpcEndpoint.
232232
*/
233233
private def reregisterWithMaster(): Unit = {
234234
Utils.tryOrExit {
@@ -365,7 +365,8 @@ private[deploy] class Worker(
365365
if (connected) { sendToMaster(Heartbeat(workerId, self)) }
366366

367367
case WorkDirCleanup =>
368-
// Spin up a separate thread (in a future) to do the dir cleanup; don't tie up worker actor
368+
// Spin up a separate thread (in a future) to do the dir cleanup; don't tie up worker
369+
// rpcEndpoint.
369370
// Copy ids so that it can be used in the cleanup thread.
370371
val appIds = executors.values.map(_.appId).toSet
371372
val cleanupFuture = concurrent.future {
@@ -684,7 +685,7 @@ private[deploy] object Worker extends Logging {
684685
workerNumber: Option[Int] = None,
685686
conf: SparkConf = new SparkConf): RpcEnv = {
686687

687-
// The LocalSparkCluster runs multiple local sparkWorkerX actor systems
688+
// The LocalSparkCluster runs multiple local sparkWorkerX RPC Environments
688689
val systemName = SYSTEM_NAME + workerNumber.map(_.toString).getOrElse("")
689690
val securityMgr = new SecurityManager(conf)
690691
val rpcEnv = RpcEnv.create(systemName, host, port, conf, securityMgr)

core/src/main/scala/org/apache/spark/deploy/worker/WorkerWatcher.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ private[spark] class WorkerWatcher(override val rpcEnv: RpcEnv, workerUrl: Strin
4343
private[deploy] def setTesting(testing: Boolean) = isTesting = testing
4444
private var isTesting = false
4545

46-
// Lets us filter events only from the worker's actor system
46+
// Lets filter events only from the worker's rpc system
4747
private val expectedAddress = RpcAddress.fromURIString(workerUrl)
4848
private def isWorker(address: RpcAddress) = expectedAddress == address
4949

@@ -62,7 +62,7 @@ private[spark] class WorkerWatcher(override val rpcEnv: RpcEnv, workerUrl: Strin
6262
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
6363
if (isWorker(remoteAddress)) {
6464
// This log message will never be seen
65-
logError(s"Lost connection to worker actor $workerUrl. Exiting.")
65+
logError(s"Lost connection to worker rpc endpoint $workerUrl. Exiting.")
6666
exitNonZero()
6767
}
6868
}

core/src/main/scala/org/apache/spark/rpc/RpcEndpointRef.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ private[spark] abstract class RpcEndpointRef(@transient conf: SparkConf)
100100
val future = ask[T](message, timeout)
101101
val result = timeout.awaitResult(future)
102102
if (result == null) {
103-
throw new SparkException("Actor returned null")
103+
throw new SparkException("RpcEndpoint returned null")
104104
}
105105
return result
106106
} catch {

core/src/main/scala/org/apache/spark/scheduler/OutputCommitCoordinator.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ private[spark] class OutputCommitCoordinator(conf: SparkConf, isDriver: Boolean)
162162

163163
private[spark] object OutputCommitCoordinator {
164164

165-
// This actor is used only for RPC
165+
// This endpoint is used only for RPC
166166
private[spark] class OutputCommitCoordinatorEndpoint(
167167
override val rpcEnv: RpcEnv, outputCommitCoordinator: OutputCommitCoordinator)
168168
extends RpcEndpoint with Logging {

core/src/main/scala/org/apache/spark/scheduler/cluster/ExecutorData.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ import org.apache.spark.rpc.{RpcEndpointRef, RpcAddress}
2222
/**
2323
* Grouping of data for an executor used by CoarseGrainedSchedulerBackend.
2424
*
25-
* @param executorEndpoint The ActorRef representing this executor
25+
* @param executorEndpoint The RpcEndpointRef representing this executor
2626
* @param executorAddress The network address of this executor
2727
* @param executorHost The hostname that this executor is running on
2828
* @param freeCores The current number of cores available for work on the executor

core/src/main/scala/org/apache/spark/util/IdGenerator.scala

+3-3
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,10 @@ import java.util.concurrent.atomic.AtomicInteger
2222
/**
2323
* A util used to get a unique generation ID. This is a wrapper around Java's
2424
* AtomicInteger. An example usage is in BlockManager, where each BlockManager
25-
* instance would start an Akka actor and we use this utility to assign the Akka
26-
* actors unique names.
25+
* instance would start an RpcEndpoint and we use this utility to assign the RpcEndpoints'
26+
* unique names.
2727
*/
2828
private[spark] class IdGenerator {
29-
private var id = new AtomicInteger
29+
private val id = new AtomicInteger
3030
def next: Int = id.incrementAndGet
3131
}

core/src/test/scala/org/apache/spark/deploy/master/CustomRecoveryModeFactory.scala

+2-2
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ object CustomPersistenceEngine {
9999
@volatile var lastInstance: Option[CustomPersistenceEngine] = None
100100
}
101101

102-
class CustomLeaderElectionAgent(val masterActor: LeaderElectable) extends LeaderElectionAgent {
103-
masterActor.electedLeader()
102+
class CustomLeaderElectionAgent(val masterInstance: LeaderElectable) extends LeaderElectionAgent {
103+
masterInstance.electedLeader()
104104
}
105105

core/src/test/scala/org/apache/spark/deploy/worker/WorkerWatcherSuite.scala

+2-3
Original file line numberDiff line numberDiff line change
@@ -38,12 +38,11 @@ class WorkerWatcherSuite extends SparkFunSuite {
3838
val conf = new SparkConf()
3939
val rpcEnv = RpcEnv.create("test", "localhost", 12345, conf, new SecurityManager(conf))
4040
val targetWorkerUrl = rpcEnv.uriOf("test", RpcAddress("1.2.3.4", 1234), "Worker")
41-
val otherAddress = "akka://[email protected]:1234/user/OtherActor"
42-
val otherAkkaAddress = RpcAddress("4.3.2.1", 1234)
41+
val otherRpcAddress = RpcAddress("4.3.2.1", 1234)
4342
val workerWatcher = new WorkerWatcher(rpcEnv, targetWorkerUrl)
4443
workerWatcher.setTesting(testing = true)
4544
rpcEnv.setupEndpoint("worker-watcher", workerWatcher)
46-
workerWatcher.onDisconnected(otherAkkaAddress)
45+
workerWatcher.onDisconnected(otherRpcAddress)
4746
assert(!workerWatcher.isShutDown)
4847
rpcEnv.shutdown()
4948
}

project/MimaExcludes.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -182,7 +182,7 @@ object MimaExcludes {
182182
ProblemFilters.exclude[IncompatibleResultTypeProblem](
183183
"org.apache.spark.broadcast.TorrentBroadcastFactory.newBroadcast"),
184184
ProblemFilters.exclude[MissingClassProblem](
185-
"org.apache.spark.scheduler.OutputCommitCoordinator$OutputCommitCoordinatorActor")
185+
"org.apache.spark.scheduler.OutputCommitCoordinator$OutputCommitCoordinatorEndpoint")
186186
) ++ Seq(
187187
// SPARK-4655 - Making Stage an Abstract class broke binary compatility even though
188188
// the stage class is defined as private[spark]

repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkILoop.scala

+1-1
Original file line numberDiff line numberDiff line change
@@ -981,7 +981,7 @@ class SparkILoop(
981981
// which spins off a separate thread, then print the prompt and try
982982
// our best to look ready. The interlocking lazy vals tend to
983983
// inter-deadlock, so we break the cycle with a single asynchronous
984-
// message to an actor.
984+
// message to an rpcEndpoint.
985985
if (isAsync) {
986986
intp initialize initializedCallback()
987987
createAsyncListener() // listens for signal to run postInitialization

0 commit comments

Comments
 (0)