Skip to content

Commit 9aa0957

Browse files
markgroverpwendell
authored andcommitted
[SPARK-1150] fix repo location in create script
https://spark-project.atlassian.net/browse/SPARK-1150 fix the repo location in create_release script Author: Mark Grover <[email protected]> Closes apache#48 from CodingCat/script_fixes and squashes the following commits: 01f4bf7 [Mark Grover] Fixing some nitpicks d2244d4 [Mark Grover] SPARK-676: Abbreviation in SPARK_MEM but not in SPARK_WORKER_MEMORY
1 parent 556c566 commit 9aa0957

File tree

3 files changed

+11
-5
lines changed

3 files changed

+11
-5
lines changed

conf/spark-env.sh.template

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
# - SPARK_MASTER_IP, to bind the master to a different IP address or hostname
1616
# - SPARK_MASTER_PORT / SPARK_MASTER_WEBUI_PORT, to use non-default ports
1717
# - SPARK_WORKER_CORES, to set the number of cores to use on this machine
18-
# - SPARK_WORKER_MEMORY, to set how much memory to use (e.g. 1000m, 2g)
18+
# - SPARK_WORKER_MEM, to set how much memory to use (e.g. 1000m, 2g)
1919
# - SPARK_WORKER_PORT / SPARK_WORKER_WEBUI_PORT
2020
# - SPARK_WORKER_INSTANCES, to set the number of worker processes per node
2121
# - SPARK_WORKER_DIR, to set the working directory of worker processes

core/src/main/scala/org/apache/spark/deploy/worker/WorkerArguments.scala

+8-2
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,15 @@
1818
package org.apache.spark.deploy.worker
1919

2020
import java.lang.management.ManagementFactory
21+
import org.apache.spark.Logging
2122

2223
import org.apache.spark.util.{IntParam, MemoryParam, Utils}
2324

2425
/**
2526
* Command-line parser for the master.
2627
*/
27-
private[spark] class WorkerArguments(args: Array[String]) {
28+
private[spark] class WorkerArguments(args: Array[String]) extends Logging {
29+
initLogging()
2830
var host = Utils.localHostName()
2931
var port = 0
3032
var webUiPort = 8081
@@ -40,9 +42,13 @@ private[spark] class WorkerArguments(args: Array[String]) {
4042
if (System.getenv("SPARK_WORKER_CORES") != null) {
4143
cores = System.getenv("SPARK_WORKER_CORES").toInt
4244
}
43-
if (System.getenv("SPARK_WORKER_MEMORY") != null) {
45+
if (System.getenv("SPARK_WORKER_MEM") != null) {
46+
memory = Utils.memoryStringToMb(System.getenv("SPARK_WORKER_MEM"))
47+
} else if (System.getenv("SPARK_WORKER_MEMORY") != null) {
48+
logWarning("SPARK_WORKER_MEMORY is deprecated. Please use SPARK_WORKER_MEM instead")
4449
memory = Utils.memoryStringToMb(System.getenv("SPARK_WORKER_MEMORY"))
4550
}
51+
4652
if (System.getenv("SPARK_WORKER_WEBUI_PORT") != null) {
4753
webUiPort = System.getenv("SPARK_WORKER_WEBUI_PORT").toInt
4854
}

docs/spark-standalone.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -104,8 +104,8 @@ You can optionally configure the cluster further by setting environment variable
104104
<td>Total number of cores to allow Spark applications to use on the machine (default: all available cores).</td>
105105
</tr>
106106
<tr>
107-
<td><code>SPARK_WORKER_MEMORY</code></td>
108-
<td>Total amount of memory to allow Spark applications to use on the machine, e.g. <code>1000m</code>, <code>2g</code> (default: total memory minus 1 GB); note that each application's <i>individual</i> memory is configured using its <code>spark.executor.memory</code> property.</td>
107+
<td><code>SPARK_WORKER_MEM</code></td>
108+
<td>Total amount of memory to allow Spark applications to use on the machine, e.g. <code>1000m</code>, <code>2g</code> (default: total memory minus 1 GB); note that each application's <i>individual</i> memory is configured using its <code>spark.executor.memory</code> property. The old variable </code>SPARK_WORKER_MEMORY</code> has been deprecated.</td>
109109
</tr>
110110
<tr>
111111
<td><code>SPARK_WORKER_WEBUI_PORT</code></td>

0 commit comments

Comments
 (0)