From 176c10f7ad20c013703a8d219543e381e5f9746a Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Thu, 3 Feb 2022 15:46:30 -0700 Subject: [PATCH] build: set release version to v1.8.4 With the patch release we update the example manifests to v1.8.4. At the same time the upgrade tests are updated to use the latest v1.7.11. Signed-off-by: Travis Nielsen --- Documentation/ceph-monitoring.md | 2 +- Documentation/ceph-upgrade.md | 38 +++++++++---------- Documentation/quickstart.md | 2 +- deploy/examples/direct-mount.yaml | 2 +- deploy/examples/images.txt | 2 +- deploy/examples/operator-openshift.yaml | 2 +- deploy/examples/operator.yaml | 2 +- deploy/examples/osd-purge.yaml | 2 +- deploy/examples/toolbox-job.yaml | 4 +- deploy/examples/toolbox.yaml | 2 +- .../installer/ceph_manifests_previous.go | 2 +- 11 files changed, 30 insertions(+), 30 deletions(-) diff --git a/Documentation/ceph-monitoring.md b/Documentation/ceph-monitoring.md index 4625b733dc96..7e4d92e6d924 100644 --- a/Documentation/ceph-monitoring.md +++ b/Documentation/ceph-monitoring.md @@ -38,7 +38,7 @@ With the Prometheus operator running, we can create a service monitor that will From the root of your locally cloned Rook repo, go the monitoring directory: ```console -$ git clone --single-branch --branch v1.8.3 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.8.4 https://github.com/rook/rook.git cd rook/deploy/examples/monitoring ``` diff --git a/Documentation/ceph-upgrade.md b/Documentation/ceph-upgrade.md index be1a71589eb3..bcacae9e9b7c 100644 --- a/Documentation/ceph-upgrade.md +++ b/Documentation/ceph-upgrade.md @@ -71,12 +71,12 @@ With this upgrade guide, there are a few notes to consider: Unless otherwise noted due to extenuating requirements, upgrades from one patch release of Rook to another are as simple as updating the common resources and the image of the Rook operator. For -example, when Rook v1.8.3 is released, the process of updating from v1.8.0 is as simple as running +example, when Rook v1.8.4 is released, the process of updating from v1.8.0 is as simple as running the following: First get the latest common resources manifests that contain the latest changes for Rook v1.8. ```sh -git clone --single-branch --depth=1 --branch v1.8.3 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.8.4 https://github.com/rook/rook.git cd rook/deploy/examples ``` @@ -87,7 +87,7 @@ section for instructions on how to change the default namespaces in `common.yaml Then apply the latest changes from v1.8 and update the Rook Operator image. ```console kubectl apply -f common.yaml -f crds.yaml -kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.3 +kubectl -n rook-ceph set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.4 ``` As exemplified above, it is a good practice to update Rook-Ceph common resources from the example @@ -265,8 +265,8 @@ Any pod that is using a Rook volume should also remain healthy: ## Rook Operator Upgrade Process -In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.7.8` to -the version `v1.8.3`. This upgrade should work from any official patch release of Rook v1.7 to any +In the examples given in this guide, we will be upgrading a live Rook cluster running `v1.7.11` to +the version `v1.8.4`. This upgrade should work from any official patch release of Rook v1.7 to any official patch release of v1.8. **Rook release from `master` are expressly unsupported.** It is strongly recommended that you use @@ -291,7 +291,7 @@ by the Operator. Also update the Custom Resource Definitions (CRDs). Get the latest common resources manifests that contain the latest changes. ```sh -git clone --single-branch --depth=1 --branch v1.8.3 https://github.com/rook/rook.git +git clone --single-branch --depth=1 --branch v1.8.4 https://github.com/rook/rook.git cd rook/deploy/examples ``` @@ -343,7 +343,7 @@ The largest portion of the upgrade is triggered when the operator's image is upd When the operator is updated, it will proceed to update all of the Ceph daemons. ```sh -kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.3 +kubectl -n $ROOK_OPERATOR_NAMESPACE set image deploy/rook-ceph-operator rook-ceph-operator=rook/ceph:v1.8.4 ``` #### Admission controller @@ -377,18 +377,18 @@ watch --exec kubectl -n $ROOK_CLUSTER_NAMESPACE get deployments -l rook_cluster= ``` As an example, this cluster is midway through updating the OSDs. When all deployments report `1/1/1` -availability and `rook-version=v1.8.3`, the Ceph cluster's core components are fully updated. +availability and `rook-version=v1.8.4`, the Ceph cluster's core components are fully updated. >``` >Every 2.0s: kubectl -n rook-ceph get deployment -o j... > ->rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.8.3 ->rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.8.3 ->rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.8.3 ->rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.8.3 ->rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.8.3 ->rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.7.8 ->rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.7.8 +>rook-ceph-mgr-a req/upd/avl: 1/1/1 rook-version=v1.8.4 +>rook-ceph-mon-a req/upd/avl: 1/1/1 rook-version=v1.8.4 +>rook-ceph-mon-b req/upd/avl: 1/1/1 rook-version=v1.8.4 +>rook-ceph-mon-c req/upd/avl: 1/1/1 rook-version=v1.8.4 +>rook-ceph-osd-0 req/upd/avl: 1// rook-version=v1.8.4 +>rook-ceph-osd-1 req/upd/avl: 1/1/1 rook-version=v1.7.11 +>rook-ceph-osd-2 req/upd/avl: 1/1/1 rook-version=v1.7.11 >``` An easy check to see if the upgrade is totally finished is to check that there is only one @@ -397,15 +397,15 @@ An easy check to see if the upgrade is totally finished is to check that there i ```console # kubectl -n $ROOK_CLUSTER_NAMESPACE get deployment -l rook_cluster=$ROOK_CLUSTER_NAMESPACE -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq This cluster is not yet finished: - rook-version=v1.7.8 - rook-version=v1.8.3 + rook-version=v1.7.11 + rook-version=v1.8.4 This cluster is finished: - rook-version=v1.8.3 + rook-version=v1.8.4 ``` ### **5. Verify the updated cluster** -At this point, your Rook operator should be running version `rook/ceph:v1.8.3`. +At this point, your Rook operator should be running version `rook/ceph:v1.8.4`. Verify the Ceph cluster's health using the [health verification section](#health-verification). diff --git a/Documentation/quickstart.md b/Documentation/quickstart.md index c09242c315cd..6d20e1fe9092 100644 --- a/Documentation/quickstart.md +++ b/Documentation/quickstart.md @@ -34,7 +34,7 @@ In order to configure the Ceph storage cluster, at least one of these local stor A simple Rook cluster can be created with the following kubectl commands and [example manifests](https://github.com/rook/rook/blob/{{ branchName }}/deploy/examples). ```console -$ git clone --single-branch --branch v1.8.3 https://github.com/rook/rook.git +$ git clone --single-branch --branch v1.8.4 https://github.com/rook/rook.git cd rook/deploy/examples kubectl create -f crds.yaml -f common.yaml -f operator.yaml kubectl create -f cluster.yaml diff --git a/deploy/examples/direct-mount.yaml b/deploy/examples/direct-mount.yaml index b65a27ef3de0..b9d0df3042ba 100644 --- a/deploy/examples/direct-mount.yaml +++ b/deploy/examples/direct-mount.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-direct-mount - image: rook/ceph:v1.8.3 + image: rook/ceph:v1.8.4 command: ["/bin/bash"] args: ["-m", "-c", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/deploy/examples/images.txt b/deploy/examples/images.txt index 2e54b09490c2..4d5620609aea 100644 --- a/deploy/examples/images.txt +++ b/deploy/examples/images.txt @@ -7,4 +7,4 @@ quay.io/cephcsi/cephcsi:v3.5.1 quay.io/csiaddons/k8s-sidecar:v0.2.1 quay.io/csiaddons/volumereplication-operator:v0.3.0 - rook/ceph:v1.8.3 + rook/ceph:v1.8.4 diff --git a/deploy/examples/operator-openshift.yaml b/deploy/examples/operator-openshift.yaml index 4627006e5dff..1635bed4dce4 100644 --- a/deploy/examples/operator-openshift.yaml +++ b/deploy/examples/operator-openshift.yaml @@ -449,7 +449,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.8.3 + image: rook/ceph:v1.8.4 args: ["ceph", "operator"] securityContext: runAsNonRoot: true diff --git a/deploy/examples/operator.yaml b/deploy/examples/operator.yaml index b0092c9996ff..628cc77b4187 100644 --- a/deploy/examples/operator.yaml +++ b/deploy/examples/operator.yaml @@ -366,7 +366,7 @@ spec: serviceAccountName: rook-ceph-system containers: - name: rook-ceph-operator - image: rook/ceph:v1.8.3 + image: rook/ceph:v1.8.4 args: ["ceph", "operator"] securityContext: runAsNonRoot: true diff --git a/deploy/examples/osd-purge.yaml b/deploy/examples/osd-purge.yaml index 46d32a367ccd..4e6ee63ed801 100644 --- a/deploy/examples/osd-purge.yaml +++ b/deploy/examples/osd-purge.yaml @@ -25,7 +25,7 @@ spec: serviceAccountName: rook-ceph-purge-osd containers: - name: osd-removal - image: rook/ceph:v1.8.3 + image: rook/ceph:v1.8.4 # TODO: Insert the OSD ID in the last parameter that is to be removed # The OSD IDs are a comma-separated list. For example: "0" or "0,2". # If you want to preserve the OSD PVCs, set `--preserve-pvc true`. diff --git a/deploy/examples/toolbox-job.yaml b/deploy/examples/toolbox-job.yaml index b35c6423c617..cbadd972242b 100644 --- a/deploy/examples/toolbox-job.yaml +++ b/deploy/examples/toolbox-job.yaml @@ -10,7 +10,7 @@ spec: spec: initContainers: - name: config-init - image: rook/ceph:v1.8.3 + image: rook/ceph:v1.8.4 command: ["/usr/local/bin/toolbox.sh"] args: ["--skip-watch"] imagePullPolicy: IfNotPresent @@ -32,7 +32,7 @@ spec: mountPath: /etc/rook containers: - name: script - image: rook/ceph:v1.8.3 + image: rook/ceph:v1.8.4 volumeMounts: - mountPath: /etc/ceph name: ceph-config diff --git a/deploy/examples/toolbox.yaml b/deploy/examples/toolbox.yaml index 1e86f7399fb6..59ed62e1e0a3 100644 --- a/deploy/examples/toolbox.yaml +++ b/deploy/examples/toolbox.yaml @@ -18,7 +18,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: rook-ceph-tools - image: rook/ceph:v1.8.3 + image: rook/ceph:v1.8.4 command: ["/bin/bash"] args: ["-m", "-c", "/usr/local/bin/toolbox.sh"] imagePullPolicy: IfNotPresent diff --git a/tests/framework/installer/ceph_manifests_previous.go b/tests/framework/installer/ceph_manifests_previous.go index efdc1dd982a0..582e8989b585 100644 --- a/tests/framework/installer/ceph_manifests_previous.go +++ b/tests/framework/installer/ceph_manifests_previous.go @@ -24,7 +24,7 @@ import ( const ( // The version from which the upgrade test will start - Version1_7 = "v1.7.8" + Version1_7 = "v1.7.11" ) // CephManifestsPreviousVersion wraps rook yaml definitions