From 253e14e73aef907d07dd933f66efcb657e147797 Mon Sep 17 00:00:00 2001
From: "david.truong.hes" <david.truong@master.hes-so.ch>
Date: Fri, 4 Apr 2025 17:39:33 +0200
Subject: [PATCH] rook-ceph/configs: Ajout des fichiers de configurations pour
 Ceph

---
 rook-ceph/configs/cluster/cluster.yaml        |  57 ++
 .../dashboard-external-https.yaml             |  21 +
 rook-ceph/configs/helm_rook/values.yaml       | 667 ++++++++++++++++++
 .../configs/storage_class/storageclass.yaml   |   0
 4 files changed, 745 insertions(+)
 create mode 100644 rook-ceph/configs/cluster/cluster.yaml
 create mode 100644 rook-ceph/configs/dashboard_service/dashboard-external-https.yaml
 create mode 100644 rook-ceph/configs/helm_rook/values.yaml
 create mode 100644 rook-ceph/configs/storage_class/storageclass.yaml

diff --git a/rook-ceph/configs/cluster/cluster.yaml b/rook-ceph/configs/cluster/cluster.yaml
new file mode 100644
index 0000000..f713beb
--- /dev/null
+++ b/rook-ceph/configs/cluster/cluster.yaml
@@ -0,0 +1,57 @@
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+  name: rook-ceph
+  namespace: rook-ceph
+spec:
+  cephVersion:
+    image: quay.io/ceph/ceph:v18.2.2
+    allowUnsupported: false
+  dataDirHostPath: /var/lib/rook
+
+  dashboard:
+    enabled: true
+    ssl: true
+
+  mon:
+    count: 3
+    allowMultiplePerNode: false
+
+  mgr:
+    count: 1
+
+  network:
+    hostNetwork: false
+
+  resources:
+    mgr:
+      limits:
+        memory: 512Mi
+      requests:
+        cpu: 100m
+        memory: 256Mi
+    mon:
+      limits:
+        memory: 512Mi
+      requests:
+        cpu: 100m
+        memory: 256Mi
+    osd:
+      limits:
+        memory: 1Gi
+      requests:
+        cpu: 250m
+        memory: 512Mi
+    prepareosd:
+      limits:
+        memory: 256Mi
+      requests:
+        cpu: 50m
+        memory: 128Mi
+
+  storage:
+    useAllNodes: true
+    useAllDevices: true
+    config:
+      osdsPerDevice: "1"
+
diff --git a/rook-ceph/configs/dashboard_service/dashboard-external-https.yaml b/rook-ceph/configs/dashboard_service/dashboard-external-https.yaml
new file mode 100644
index 0000000..0c1167c
--- /dev/null
+++ b/rook-ceph/configs/dashboard_service/dashboard-external-https.yaml
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: rook-ceph-mgr-dashboard-external-https
+  namespace: rook-ceph
+  labels:
+    app: rook-ceph-mgr
+    rook_cluster: rook-ceph
+spec:
+  ports:
+  - name: dashboard
+    port: 8443
+    protocol: TCP
+    targetPort: 8443
+  selector:
+    app: rook-ceph-mgr
+    rook_cluster: rook-ceph
+    mgr_role: active
+  sessionAffinity: None
+  type: NodePort
+
diff --git a/rook-ceph/configs/helm_rook/values.yaml b/rook-ceph/configs/helm_rook/values.yaml
new file mode 100644
index 0000000..153d2b9
--- /dev/null
+++ b/rook-ceph/configs/helm_rook/values.yaml
@@ -0,0 +1,667 @@
+# Default values for rook-ceph-operator
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+image:
+  # -- Image
+  repository: docker.io/rook/ceph
+  # -- Image tag
+  # @default -- `master`
+  tag: v1.16.6
+  # -- Image pull policy
+  pullPolicy: IfNotPresent
+
+crds:
+  # -- Whether the helm chart should create and update the CRDs. If false, the CRDs must be
+  # managed independently with deploy/examples/crds.yaml.
+  # **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
+  # If the CRDs are deleted in this case, see
+  # [the disaster recovery guide](https://rook.io/docs/rook/latest/Troubleshooting/disaster-recovery/#restoring-crds-after-deletion)
+  # to restore them.
+  enabled: true
+
+# -- Pod resource requests & limits
+resources:
+  limits:
+    memory: 256Mi
+  requests:
+    cpu: 100m
+    memory: 64Mi
+
+# -- Kubernetes [`nodeSelector`](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector) to add to the Deployment.
+nodeSelector: {}
+# Constraint rook-ceph-operator Deployment to nodes with label `disktype: ssd`.
+# For more info, see https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
+#  disktype: ssd
+
+# -- List of Kubernetes [`tolerations`](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/) to add to the Deployment.
+tolerations: []
+
+# -- Delay to use for the `node.kubernetes.io/unreachable` pod failure toleration to override
+# the Kubernetes default of 5 minutes
+unreachableNodeTolerationSeconds: 5
+
+# -- Whether the operator should watch cluster CRD in its own namespace or not
+currentNamespaceOnly: false
+
+# -- Custom pod labels for the operator
+operatorPodLabels: {}
+
+# -- Pod annotations
+annotations: {}
+
+# -- Global log level for the operator.
+# Options: `ERROR`, `WARNING`, `INFO`, `DEBUG`
+logLevel: INFO
+
+# -- If true, create & use RBAC resources
+rbacEnable: true
+
+rbacAggregate:
+  # -- If true, create a ClusterRole aggregated to [user facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) for objectbucketclaims
+  enableOBCs: false
+
+# -- If true, create & use PSP resources
+pspEnable: false
+
+# -- Set the priority class for the rook operator deployment if desired
+priorityClassName:
+
+# -- Set the container security context for the operator
+containerSecurityContext:
+  runAsNonRoot: true
+  runAsUser: 2016
+  runAsGroup: 2016
+  capabilities:
+    drop: ["ALL"]
+# -- If true, loop devices are allowed to be used for osds in test clusters
+allowLoopDevices: false
+
+# Settings for whether to disable the drivers or other daemons if they are not
+# needed
+csi:
+  # -- Enable Ceph CSI RBD driver
+  enableRbdDriver: true
+  # -- Enable Ceph CSI CephFS driver
+  enableCephfsDriver: true
+  # -- Disable the CSI driver.
+  disableCsiDriver: "false"
+
+  # -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
+  # in some network configurations where the SDN does not provide access to an external cluster or
+  # there is significant drop in read/write performance
+  enableCSIHostNetwork: true
+  # -- Enable Snapshotter in CephFS provisioner pod
+  enableCephfsSnapshotter: true
+  # -- Enable Snapshotter in NFS provisioner pod
+  enableNFSSnapshotter: true
+  # -- Enable Snapshotter in RBD provisioner pod
+  enableRBDSnapshotter: true
+  # -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
+  enablePluginSelinuxHostMount: false
+  # -- Enable Ceph CSI PVC encryption support
+  enableCSIEncryption: false
+
+  # -- Enable volume group snapshot feature. This feature is
+  # enabled by default as long as the necessary CRDs are available in the cluster.
+  enableVolumeGroupSnapshot: true
+  # -- PriorityClassName to be set on csi driver plugin pods
+  pluginPriorityClassName: system-node-critical
+
+  # -- PriorityClassName to be set on csi driver provisioner pods
+  provisionerPriorityClassName: system-cluster-critical
+
+  # -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  rbdFSGroupPolicy: "File"
+
+  # -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  cephFSFSGroupPolicy: "File"
+
+  # -- Policy for modifying a volume's ownership or permissions when the NFS PVC is being mounted.
+  # supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
+  nfsFSGroupPolicy: "File"
+
+  # -- OMAP generator generates the omap mapping between the PV name and the RBD image
+  # which helps CSI to identify the rbd images for CSI operations.
+  # `CSI_ENABLE_OMAP_GENERATOR` needs to be enabled when we are using rbd mirroring feature.
+  # By default OMAP generator is disabled and when enabled, it will be deployed as a
+  # sidecar with CSI provisioner pod, to enable set it to true.
+  enableOMAPGenerator: false
+
+  # -- Set CephFS Kernel mount options to use https://docs.ceph.com/en/latest/man/8/mount.ceph/#options.
+  # Set to "ms_mode=secure" when connections.encrypted is enabled in CephCluster CR
+  cephFSKernelMountOptions:
+
+  # -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
+  # Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
+  # Hence enable metadata is false by default
+  enableMetadata: false
+
+  # -- Set replicas for csi provisioner deployment
+  provisionerReplicas: 2
+
+  # -- Cluster name identifier to set as metadata on the CephFS subvolume and RBD images. This will be useful
+  # in cases like for example, when two container orchestrator clusters (Kubernetes/OCP) are using a single ceph cluster
+  clusterName:
+
+  # -- Set logging level for cephCSI containers maintained by the cephCSI.
+  # Supported values from 0 to 5. 0 for general useful logs, 5 for trace level verbosity.
+  logLevel: 0
+
+  # -- Set logging level for Kubernetes-csi sidecar containers.
+  # Supported values from 0 to 5. 0 for general useful logs (the default), 5 for trace level verbosity.
+  # @default -- `0`
+  sidecarLogLevel:
+
+  # -- CSI driver name prefix for cephfs, rbd and nfs.
+  # @default -- `namespace name where rook-ceph operator is deployed`
+  csiDriverNamePrefix:
+
+  # -- CSI RBD plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  rbdPluginUpdateStrategy:
+
+  # -- A maxUnavailable parameter of CSI RBD plugin daemonset update strategy.
+  # @default -- `1`
+  rbdPluginUpdateStrategyMaxUnavailable:
+
+  # -- CSI CephFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  cephFSPluginUpdateStrategy:
+
+  # -- A maxUnavailable parameter of CSI cephFS plugin daemonset update strategy.
+  # @default -- `1`
+  cephFSPluginUpdateStrategyMaxUnavailable:
+
+  # -- CSI NFS plugin daemonset update strategy, supported values are OnDelete and RollingUpdate
+  # @default -- `RollingUpdate`
+  nfsPluginUpdateStrategy:
+
+  # -- Set GRPC timeout for csi containers (in seconds). It should be >= 120. If this value is not set or is invalid, it defaults to 150
+  grpcTimeoutInSeconds: 150
+
+  # -- Burst to use while communicating with the kubernetes apiserver.
+  kubeApiBurst:
+
+  # -- QPS to use while communicating with the kubernetes apiserver.
+  kubeApiQPS:
+
+  # -- The volume of the CephCSI RBD plugin DaemonSet
+  csiRBDPluginVolume:
+  #  - name: lib-modules
+  #    hostPath:
+  #      path: /run/booted-system/kernel-modules/lib/modules/
+  #  - name: host-nix
+  #    hostPath:
+  #      path: /nix
+
+  # -- The volume mounts of the CephCSI RBD plugin DaemonSet
+  csiRBDPluginVolumeMount:
+  #  - name: host-nix
+  #    mountPath: /nix
+  #    readOnly: true
+
+  # -- The volume of the CephCSI CephFS plugin DaemonSet
+  csiCephFSPluginVolume:
+  #  - name: lib-modules
+  #    hostPath:
+  #      path: /run/booted-system/kernel-modules/lib/modules/
+  #  - name: host-nix
+  #    hostPath:
+  #      path: /nix
+
+  # -- The volume mounts of the CephCSI CephFS plugin DaemonSet
+  csiCephFSPluginVolumeMount:
+  #  - name: host-nix
+  #    mountPath: /nix
+  #    readOnly: true
+
+  # -- CEPH CSI RBD provisioner resource requirement list
+  # csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
+  # @default -- see values.yaml
+  csiRBDProvisionerResource: |
+    - name : csi-provisioner
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+    - name : csi-resizer
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+    - name : csi-attacher
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+    - name : csi-snapshotter
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+    - name : csi-rbdplugin
+      resource:
+        requests:
+          memory: 512Mi
+        limits:
+          memory: 1Gi
+    - name : csi-omap-generator
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+    - name : liveness-prometheus
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+
+  # -- CEPH CSI RBD plugin resource requirement list
+  # @default -- see values.yaml
+  csiRBDPluginResource: |
+    - name : driver-registrar
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+    - name : csi-rbdplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+    - name : liveness-prometheus
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+
+  # -- CEPH CSI CephFS provisioner resource requirement list
+  # @default -- see values.yaml
+  csiCephFSProvisionerResource: |
+    - name : csi-provisioner
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+    - name : csi-resizer
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+    - name : csi-attacher
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+    - name : csi-snapshotter
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+    - name : csi-cephfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+    - name : liveness-prometheus
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+
+  # -- CEPH CSI CephFS plugin resource requirement list
+  # @default -- see values.yaml
+  csiCephFSPluginResource: |
+    - name : driver-registrar
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+    - name : csi-cephfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+    - name : liveness-prometheus
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+
+  # -- CEPH CSI NFS provisioner resource requirement list
+  # @default -- see values.yaml
+  csiNFSProvisionerResource: |
+    - name : csi-provisioner
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 100m
+        limits:
+          memory: 256Mi
+    - name : csi-nfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+    - name : csi-attacher
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+
+  # -- CEPH CSI NFS plugin resource requirement list
+  # @default -- see values.yaml
+  csiNFSPluginResource: |
+    - name : driver-registrar
+      resource:
+        requests:
+          memory: 128Mi
+          cpu: 50m
+        limits:
+          memory: 256Mi
+    - name : csi-nfsplugin
+      resource:
+        requests:
+          memory: 512Mi
+          cpu: 250m
+        limits:
+          memory: 1Gi
+
+  # Set provisionerTolerations and provisionerNodeAffinity for provisioner pod.
+  # The CSI provisioner would be best to start on the same nodes as other ceph daemons.
+
+  # -- Array of tolerations in YAML format which will be added to CSI provisioner deployment
+  provisionerTolerations:
+  #    - key: key
+  #      operator: Exists
+  #      effect: NoSchedule
+
+  # -- The node labels for affinity of the CSI provisioner deployment [^1]
+  provisionerNodeAffinity: #key1=value1,value2; key2=value3
+  # Set pluginTolerations and pluginNodeAffinity for plugin daemonset pods.
+  # The CSI plugins need to be started on all the nodes where the clients need to mount the storage.
+
+  # -- Array of tolerations in YAML format which will be added to CephCSI plugin DaemonSet
+  pluginTolerations:
+  #    - key: key
+  #      operator: Exists
+  #      effect: NoSchedule
+
+  # -- The node labels for affinity of the CephCSI RBD plugin DaemonSet [^1]
+  pluginNodeAffinity: # key1=value1,value2; key2=value3
+
+  # -- Enable Ceph CSI Liveness sidecar deployment
+  enableLiveness: false
+
+  # -- CSI CephFS driver metrics port
+  # @default -- `9081`
+  cephfsLivenessMetricsPort:
+
+  # -- CSI Addons server port
+  # @default -- `9070`
+  csiAddonsPort:
+  # -- CSI Addons server port for the RBD provisioner
+  # @default -- `9070`
+  csiAddonsRBDProvisionerPort:
+  # -- CSI Addons server port for the Ceph FS provisioner
+  # @default -- `9070`
+  csiAddonsCephFSProvisionerPort:
+
+  # -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
+  # you may want to disable this setting. However, this will cause an issue during upgrades
+  # with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
+  forceCephFSKernelClient: true
+
+  # -- Ceph CSI RBD driver metrics port
+  # @default -- `8080`
+  rbdLivenessMetricsPort:
+
+  serviceMonitor:
+    # -- Enable ServiceMonitor for Ceph CSI drivers
+    enabled: false
+    # -- Service monitor scrape interval
+    interval: 10s
+    # -- ServiceMonitor additional labels
+    labels: {}
+    # -- Use a different namespace for the ServiceMonitor
+    namespace:
+
+  # -- Kubelet root directory path (if the Kubelet uses a different path for the `--root-dir` flag)
+  # @default -- `/var/lib/kubelet`
+  kubeletDirPath:
+
+  # -- Duration in seconds that non-leader candidates will wait to force acquire leadership.
+  # @default -- `137s`
+  csiLeaderElectionLeaseDuration:
+
+  # -- Deadline in seconds that the acting leader will retry refreshing leadership before giving up.
+  # @default -- `107s`
+  csiLeaderElectionRenewDeadline:
+
+  # -- Retry period in seconds the LeaderElector clients should wait between tries of actions.
+  # @default -- `26s`
+  csiLeaderElectionRetryPeriod:
+
+  cephcsi:
+    # -- Ceph CSI image repository
+    repository: quay.io/cephcsi/cephcsi
+    # -- Ceph CSI image tag
+    tag: v3.13.1
+
+  registrar:
+    # -- Kubernetes CSI registrar image repository
+    repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
+    # -- Registrar image tag
+    tag: v2.13.0
+
+  provisioner:
+    # -- Kubernetes CSI provisioner image repository
+    repository: registry.k8s.io/sig-storage/csi-provisioner
+    # -- Provisioner image tag
+    tag: v5.1.0
+
+  snapshotter:
+    # -- Kubernetes CSI snapshotter image repository
+    repository: registry.k8s.io/sig-storage/csi-snapshotter
+    # -- Snapshotter image tag
+    tag: v8.2.0
+
+  attacher:
+    # -- Kubernetes CSI Attacher image repository
+    repository: registry.k8s.io/sig-storage/csi-attacher
+    # -- Attacher image tag
+    tag: v4.8.0
+
+  resizer:
+    # -- Kubernetes CSI resizer image repository
+    repository: registry.k8s.io/sig-storage/csi-resizer
+    # -- Resizer image tag
+    tag: v1.13.1
+
+  # -- Image pull policy
+  imagePullPolicy: IfNotPresent
+
+  # -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
+  cephfsPodLabels: #"key1=value1,key2=value2"
+
+  # -- Labels to add to the CSI NFS Deployments and DaemonSets Pods
+  nfsPodLabels: #"key1=value1,key2=value2"
+
+  # -- Labels to add to the CSI RBD Deployments and DaemonSets Pods
+  rbdPodLabels: #"key1=value1,key2=value2"
+
+  csiAddons:
+    # -- Enable CSIAddons
+    enabled: false
+    # -- CSIAddons sidecar image repository
+    repository: quay.io/csiaddons/k8s-sidecar
+    # -- CSIAddons sidecar image tag
+    tag: v0.11.0
+
+  nfs:
+    # -- Enable the nfs csi driver
+    enabled: false
+
+  topology:
+    # -- Enable topology based provisioning
+    enabled: false
+    # NOTE: the value here serves as an example and needs to be
+    # updated with node labels that define domains of interest
+    # -- domainLabels define which node labels to use as domains
+    # for CSI nodeplugins to advertise their domains
+    domainLabels:
+    # - kubernetes.io/hostname
+    # - topology.kubernetes.io/zone
+    # - topology.rook.io/rack
+
+  # -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+  # of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
+  # CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  cephFSAttachRequired: true
+  # -- Whether to skip any attach operation altogether for RBD PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If set to false it skips the volume attachments and makes the creation of pods using the RBD PVC fast.
+  # **WARNING** It's highly discouraged to use this for RWO volumes as it can cause data corruption.
+  # csi-addons operations like Reclaimspace and PVC Keyrotation will also not be supported if set
+  # to false since we'll have no VolumeAttachments to determine which node the PVC is mounted on.
+  # Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  rbdAttachRequired: true
+  # -- Whether to skip any attach operation altogether for NFS PVCs. See more details
+  # [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
+  # If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
+  # of pods using the NFS PVC fast. **WARNING** It's highly discouraged to use this for
+  # NFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
+  nfsAttachRequired: true
+
+# -- Enable discovery daemon
+enableDiscoveryDaemon: true
+# -- Set the discovery daemon device discovery interval (default to 60m)
+discoveryDaemonInterval: 60m
+
+# -- The timeout for ceph commands in seconds
+cephCommandsTimeoutSeconds: "15"
+
+# -- If true, run rook operator on the host network
+useOperatorHostNetwork: true
+
+# -- If true, scale down the rook operator.
+# This is useful for administrative actions where the rook operator must be scaled down, while using gitops style tooling
+# to deploy your helm charts.
+scaleDownOperator: false
+
+## Rook Discover configuration
+## toleration: NoSchedule, PreferNoSchedule or NoExecute
+## tolerationKey: Set this to the specific key of the taint to tolerate
+## tolerations: Array of tolerations in YAML format which will be added to agent deployment
+## nodeAffinity: Set to labels of the node to match
+
+discover:
+  # -- Toleration for the discover pods.
+  # Options: `NoSchedule`, `PreferNoSchedule` or `NoExecute`
+  toleration:
+  # -- The specific key of the taint to tolerate
+  tolerationKey:
+  # -- Array of tolerations in YAML format which will be added to discover deployment
+  tolerations:
+  #   - key: key
+  #     operator: Exists
+  #     effect: NoSchedule
+  # -- The node labels for affinity of `discover-agent` [^1]
+  nodeAffinity:
+  #   key1=value1,value2; key2=value3
+  #
+  #   or
+  #
+  #   requiredDuringSchedulingIgnoredDuringExecution:
+  #     nodeSelectorTerms:
+  #       - matchExpressions:
+  #           - key: storage-node
+  #             operator: Exists
+  # -- Labels to add to the discover pods
+  podLabels: # "key1=value1,key2=value2"
+  # -- Add resources to discover daemon pods
+  resources:
+  #   - limits:
+  #       memory: 512Mi
+  #   - requests:
+  #       cpu: 100m
+  #       memory: 128Mi
+
+# -- Custom label to identify node hostname. If not set `kubernetes.io/hostname` will be used
+customHostnameLabel:
+
+# -- Runs Ceph Pods as privileged to be able to write to `hostPaths` in OpenShift with SELinux restrictions.
+hostpathRequiresPrivileged: false
+
+# -- Whether to create all Rook pods to run on the host network, for example in environments where a CNI is not enabled
+enforceHostNetwork: false
+
+# -- Disable automatic orchestration when new devices are discovered.
+disableDeviceHotplug: false
+
+# -- The revision history limit for all pods created by Rook. If blank, the K8s default is 10.
+revisionHistoryLimit:
+
+# -- Blacklist certain disks according to the regex provided.
+discoverDaemonUdev:
+
+# -- imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
+imagePullSecrets:
+# - name: my-registry-secret
+
+# -- Whether the OBC provisioner should watch on the operator namespace or not, if not the namespace of the cluster will be used
+enableOBCWatchOperatorNamespace: true
+
+# -- Specify the prefix for the OBC provisioner in place of the cluster namespace
+# @default -- `ceph cluster namespace`
+obcProvisionerNamePrefix:
+
+monitoring:
+  # -- Enable monitoring. Requires Prometheus to be pre-installed.
+  # Enabling will also create RBAC rules to allow Operator to create ServiceMonitors
+  enabled: false
diff --git a/rook-ceph/configs/storage_class/storageclass.yaml b/rook-ceph/configs/storage_class/storageclass.yaml
new file mode 100644
index 0000000..e69de29
-- 
GitLab