@@ -36,15 +36,15 @@ If you wish to use pre-built docker images, you may use the images published in
3636<tr ><th >Component</th ><th >Image</th ></tr >
3737<tr >
3838 <td >Spark Driver Image</td >
39- <td ><code >kubespark/spark-driver:v2.1 .0-kubernetes-0.2 .0</code ></td >
39+ <td ><code >kubespark/spark-driver:v2.2 .0-kubernetes-0.3 .0</code ></td >
4040</tr >
4141<tr >
4242 <td >Spark Executor Image</td >
43- <td ><code >kubespark/spark-executor:v2.1 .0-kubernetes-0.2 .0</code ></td >
43+ <td ><code >kubespark/spark-executor:v2.2 .0-kubernetes-0.3 .0</code ></td >
4444</tr >
4545<tr >
4646 <td >Spark Initialization Image</td >
47- <td ><code >kubespark/spark-init:v2.1 .0-kubernetes-0.2 .0</code ></td >
47+ <td ><code >kubespark/spark-init:v2.2 .0-kubernetes-0.3 .0</code ></td >
4848</tr >
4949</table >
5050
@@ -80,9 +80,9 @@ are set up as described above:
8080 --kubernetes-namespace default \
8181 --conf spark.executor.instances=5 \
8282 --conf spark.app.name=spark-pi \
83- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1 .0-kubernetes-0.2 .0 \
84- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1 .0-kubernetes-0.2 .0 \
85- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1 .0-kubernetes-0.2 .0 \
83+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.2 .0-kubernetes-0.3 .0 \
84+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.2 .0-kubernetes-0.3 .0 \
85+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.2 .0-kubernetes-0.3 .0 \
8686 local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar
8787
8888The Spark master, specified either via passing the ` --master ` command line argument to ` spark-submit ` or by setting
@@ -129,9 +129,9 @@ and then you can compute the value of Pi as follows:
129129 --kubernetes-namespace default \
130130 --conf spark.executor.instances=5 \
131131 --conf spark.app.name=spark-pi \
132- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1 .0-kubernetes-0.2 .0 \
133- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1 .0-kubernetes-0.2 .0 \
134- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1 .0-kubernetes-0.2 .0 \
132+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.2 .0-kubernetes-0.3 .0 \
133+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.2 .0-kubernetes-0.3 .0 \
134+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.2 .0-kubernetes-0.3 .0 \
135135 --conf spark.kubernetes.resourceStagingServer.uri=http://<address-of-any-cluster-node>:31000 \
136136 examples/jars/spark_examples_2.11-2.2.0.jar
137137
@@ -170,9 +170,9 @@ If our local proxy were listening on port 8001, we would have our submission loo
170170 --kubernetes-namespace default \
171171 --conf spark.executor.instances=5 \
172172 --conf spark.app.name=spark-pi \
173- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1 .0-kubernetes-0.2 .0 \
174- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1 .0-kubernetes-0.2 .0 \
175- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1 .0-kubernetes-0.2 .0 \
173+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.2 .0-kubernetes-0.3 .0 \
174+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.2 .0-kubernetes-0.3 .0 \
175+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.2 .0-kubernetes-0.3 .0 \
176176 local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar
177177
178178Communication between Spark and Kubernetes clusters is performed using the fabric8 kubernetes-client library.
@@ -220,7 +220,7 @@ service because there may be multiple shuffle service instances running in a clu
220220a way to target a particular shuffle service.
221221
222222For example, if the shuffle service we want to use is in the default namespace, and
223- has pods with labels ` app=spark-shuffle-service ` and ` spark-version=2.1 .0 ` , we can
223+ has pods with labels ` app=spark-shuffle-service ` and ` spark-version=2.2 .0 ` , we can
224224use those tags to target that particular shuffle service at job launch time. In order to run a job with dynamic allocation enabled,
225225the command may then look like the following:
226226
@@ -235,7 +235,7 @@ the command may then look like the following:
235235 --conf spark.dynamicAllocation.enabled=true \
236236 --conf spark.shuffle.service.enabled=true \
237237 --conf spark.kubernetes.shuffle.namespace=default \
238- --conf spark.kubernetes.shuffle.labels="app=spark-shuffle-service,spark-version=2.1 .0" \
238+ --conf spark.kubernetes.shuffle.labels="app=spark-shuffle-service,spark-version=2.2 .0" \
239239 local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar 10 400000 2
240240
241241## Advanced
@@ -312,9 +312,9 @@ communicate with the resource staging server over TLS. The trustStore can be set
312312 --kubernetes-namespace default \
313313 --conf spark.executor.instances=5 \
314314 --conf spark.app.name=spark-pi \
315- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1 .0-kubernetes-0.2 .0 \
316- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1 .0-kubernetes-0.2 .0 \
317- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1 .0-kubernetes-0.2 .0 \
315+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.2 .0-kubernetes-0.3 .0 \
316+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.2 .0-kubernetes-0.3 .0 \
317+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.2 .0-kubernetes-0.3 .0 \
318318 --conf spark.kubernetes.resourceStagingServer.uri=https://<address-of-any-cluster-node>:31000 \
319319 --conf spark.ssl.kubernetes.resourceStagingServer.enabled=true \
320320 --conf spark.ssl.kubernetes.resourceStagingServer.clientCertPem=/home/myuser/cert.pem \
0 commit comments