Skip to content

Commit

Permalink
added files
Browse files Browse the repository at this point in the history
  • Loading branch information
Wei Min Chan committed Jun 18, 2020
1 parent 45ddc3c commit 90cdfd5
Show file tree
Hide file tree
Showing 3 changed files with 141 additions and 0 deletions.
13 changes: 13 additions & 0 deletions Ceph_Rook_EKS/ClusterSetup/rook_ceph/object-user.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#################################################################################################################
# Create an object store user for access to the s3 endpoint.
# kubectl create -f object-user.yaml
#################################################################################################################

apiVersion: ceph.rook.io/v1
kind: CephObjectStoreUser
metadata:
name: my-user
namespace: rook-ceph
spec:
store: my-store
displayName: "my display name"
79 changes: 79 additions & 0 deletions Ceph_Rook_EKS/ClusterSetup/rook_ceph/object.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
#################################################################################################################
# Create an object store with settings for replication in a production environment. A minimum of 3 hosts with
# OSDs are required in this example.
# kubectl create -f object.yaml
#################################################################################################################

apiVersion: ceph.rook.io/v1
kind: CephObjectStore
metadata:
name: my-store
namespace: rook-ceph
spec:
# The pool spec used to create the metadata pools. Must use replication.
metadataPool:
failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
requireSafeReplicaSize: true
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#targetSizeRatio: .5
# The pool spec used to create the data pool. Can use replication or erasure coding.
dataPool:
failureDomain: host
replicated:
size: 3
# Disallow setting pool with replica 1, this could lead to data loss without recovery.
# Make sure you're *ABSOLUTELY CERTAIN* that is what you want
requireSafeReplicaSize: true
# gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool
# for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size
#targetSizeRatio: .5
# Inline compression mode for the data pool
# Further reference: https://docs.ceph.com/docs/nautilus/rados/configuration/bluestore-config-ref/#inline-compression
compressionMode: none
# Whether to preserve metadata and data pools on object store deletion
preservePoolsOnDelete: false
# The gateway service configuration
gateway:
# type of the gateway (s3)
type: s3
# A reference to the secret in the rook namespace where the ssl certificate is stored
sslCertificateRef:
# The port that RGW pods will listen on (http)
port: 80
# The port that RGW pods will listen on (https). An ssl certificate is required.
securePort:
# The number of pods in the rgw deployment
instances: 1
# The affinity rules to apply to the rgw deployment or daemonset.
placement:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - rgw-node
# topologySpreadConstraints:
# tolerations:
# - key: rgw-node
# operator: Exists
# podAffinity:
# podAntiAffinity:
# A key/value list of annotations
annotations:
# key: value
resources:
# The requests and limits set here, allow the object store gateway Pod(s) to use half of one CPU core and 1 gigabyte of memory
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# priorityClassName: my-priority-class
49 changes: 49 additions & 0 deletions Ceph_Rook_EKS/ClusterSetup/rook_ceph/toolbox.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: rook-ceph-tools
namespace: rook-ceph
labels:
app: rook-ceph-tools
spec:
replicas: 1
selector:
matchLabels:
app: rook-ceph-tools
template:
metadata:
labels:
app: rook-ceph-tools
spec:
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: rook-ceph-tools
image: rook/ceph:v1.3.5
command: ["/tini"]
args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
imagePullPolicy: IfNotPresent
env:
- name: ROOK_ADMIN_SECRET
valueFrom:
secretKeyRef:
name: rook-ceph-mon
key: admin-secret
volumeMounts:
- mountPath: /etc/ceph
name: ceph-config
- name: mon-endpoint-volume
mountPath: /etc/rook
volumes:
- name: mon-endpoint-volume
configMap:
name: rook-ceph-mon-endpoints
items:
- key: data
path: mon-endpoints
- name: ceph-config
emptyDir: {}
tolerations:
- key: "node.kubernetes.io/unreachable"
operator: "Exists"
effect: "NoExecute"
tolerationSeconds: 5

0 comments on commit 90cdfd5

Please sign in to comment.