feat: Add rook-ceph on the spare disks (VM only)

This commit is contained in:
2026-02-24 14:47:26 +01:00
parent 718b49f971
commit b1d5500d77
11 changed files with 282 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: rook-ceph
# TODO: Enable alerts later
#components:
# - ../../components/alerts
resources:
- ./namespace.yaml
- ./rook-ceph

View File

@@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: _
annotations:
kustomize.toolkit.fluxcd.io/prune: disabled

View File

@@ -0,0 +1,49 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: rook-ceph
spec:
interval: 1h
path: ./kubernetes/apps/rook-ceph/rook-ceph/app
prune: true
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: rook-ceph
wait: true
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: rook-ceph-cluster
spec:
dependsOn:
- name: rook-ceph
- name: volsync
namespace: volsync-system
healthChecks:
- apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
name: rook-ceph-cluster
namespace: rook-ceph
- apiVersion: ceph.rook.io/v1
kind: CephCluster
namespace: rook-ceph
name: rook-ceph
healthCheckExprs:
- apiVersion: ceph.rook.io/v1
kind: CephCluster
failed: status.ceph.health == 'HEALTH_ERR'
current: status.ceph.health in ['HEALTH_OK', 'HEALTH_WARN']
interval: 1h
path: ./kubernetes/apps/rook-ceph/rook-ceph/cluster
prune: true
sourceRef:
kind: GitRepository
name: flux-system
namespace: flux-system
targetNamespace: rook-ceph

View File

@@ -0,0 +1,10 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./externalsecret.yaml
# - ./grafanadashboard.yaml
- ./helmrelease.yaml
- ./ocirepository.yaml
- ./secret.sops.yaml

View File

@@ -0,0 +1,14 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/ocirepository_v1.json
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: rook-ceph
spec:
interval: 15m
layerSelector:
mediaType: application/vnd.cncf.helm.chart.content.v1.tar+gzip
operation: copy
ref:
tag: v1.19.1
url: oci://ghcr.io/rook/rook-ceph

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Secret
metadata:
name: rook-ceph-dashboard-password
stringData:
api-token: ENC[AES256_GCM,data:Q/EO1flnXjhh/GuaFMufV4T6a6X6+slo1g==,iv:YsQmkJ6VRkmAWya6Fmlt6YUW/yX3DTqZOS6Z2c8+WwA=,tag:hAUOIr8hDFRRHYeXyxvhpg==,type:str]
sops:
age:
- recipient: age1yzrqhl9dk8ljswpmzsqme3enad5kxxhsptdvecy3lwlq0ms80gaqxrctst
enc: |
-----BEGIN AGE ENCRYPTED FILE-----
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBncEg3QlNCdXJvMlFvUVgx
RU9jU2E1K3h5dlphWmN4R3VhdXBYaDhybFZFCjJuRjFoZ25RQU53RDhpeElTb1Ba
RXVYdWFFVFlZT0JmOXRRc3JlWk9zdmcKLS0tIDhFSkJJcytTR1JIZlBIT2ZNZGJ6
YWxtMWJrd3hUQlQ3dG04TlRWdy9VbzQKNcokkZu9wDTKM17sLcJ7OkafSI1nFhyO
/IM1vRlkJh12vPFE4351skFkgDdExf4gRoZH9MzXdDSh5b/2YBl8Ig==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2026-02-24T13:42:07Z"
mac: ENC[AES256_GCM,data:l5WfPr1HQ94V+TbgLFavTF569qO/9hcgqh7XP3NRZH/Z8/xfL496Cint2DwNkE6RB1JPAM4CpsOeCF3HItOgvonokIgZswyCeKwdU5nrWH9UO9pkAIsVjVLRNSbXJhsZiRJQmdQ2SescDSs/5S3wo+x8EO8PPj41TbZBvzUolcw=,iv:3QsirCiB81SVZ+yNAMr1IdWAbtHywPC8E444y+UEem8=,tag:u6uk/YdzQ2Svb3Tbbx3TGw==,type:str]
encrypted_regex: ^(data|stringData)$
mac_only_encrypted: true
version: 3.11.0

View File

@@ -0,0 +1,140 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json
apiVersion: helm.toolkit.fluxcd.io/v2
kind: HelmRelease
metadata:
name: rook-ceph-cluster
spec:
chartRef:
kind: OCIRepository
name: rook-ceph-cluster
interval: 1h
values:
cephClusterSpec:
cleanupPolicy:
wipeDevicesFromOtherClusters: true
crashCollector:
disable: false
csi:
readAffinity:
enabled: false # https://github.com/ceph/ceph-csi/issues/5772
dashboard:
enabled: true
urlPrefix: /
ssl: false
prometheusEndpoint: http://prometheus-operated.observability.svc.cluster.local:9090
mgr:
modules:
- name: pg_autoscaler
enabled: true
network:
connections:
requireMsgr2: true
resources:
osd:
limits:
memory: 8Gi # Required for bootstrap
storage:
devicePathFilter: /dev/disk/by-path/pci-0000:00:10.0-scsi-0:0:6:0
useAllDevices: false
useAllNodes: true
monitoring:
enabled: true
createPrometheusRules: true
toolbox:
enabled: true
route:
dashboard:
host:
name: rook.laurivan.com
path: /
pathType: PathPrefix
parentRefs:
- name: envoy-internal
namespace: network
cephBlockPools:
- name: ceph-blockpool
spec:
failureDomain: host
replicated:
size: 3
storageClass:
enabled: true
name: ceph-block
allowVolumeExpansion: true
isDefault: true
mountOptions:
- discard
parameters:
compression_algorithm: zstd
compression_mode: passive
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-publish-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/controller-publish-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
imageFeatures: deep-flatten,exclusive-lock,fast-diff,layering,object-map
imageFormat: "2"
reclaimPolicy: Delete
volumeBindingMode: Immediate
cephBlockPoolsVolumeSnapshotClass:
enabled: true
name: csi-ceph-blockpool
deletionPolicy: Delete
isDefault: true
cephFileSystems:
- name: ceph-filesystem
spec:
metadataPool:
replicated:
size: 3
dataPools:
- name: data0
failureDomain: host
replicated:
size: 3
metadataServer:
activeCount: 1
activeStandby: true
placement:
topologySpreadConstraints:
- maxSkew: 1
topologyKey: kubernetes.io/hostname
whenUnsatisfiable: DoNotSchedule
labelSelector:
matchLabels:
app.kubernetes.io/name: ceph-mds
app.kubernetes.io/part-of: ceph-filesystem
priorityClassName: system-cluster-critical
resources:
requests:
cpu: 100m
memory: 1Gi
limits:
memory: 4Gi
storageClass:
enabled: true
name: ceph-filesystem
allowVolumeExpansion: true
isDefault: false
parameters:
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/controller-publish-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/controller-publish-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}"
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}"
pool: data0
reclaimPolicy: Delete
volumeBindingMode: Immediate
cephFileSystemVolumeSnapshotClass:
enabled: true
name: csi-ceph-filesystem
deletionPolicy: Delete
isDefault: false
cephObjectStores: []

View File

@@ -0,0 +1,7 @@
---
# yaml-language-server: $schema=https://json.schemastore.org/kustomization
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./helmrelease.yaml
- ./ocirepository.yaml

View File

@@ -0,0 +1,14 @@
---
# yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/ocirepository_v1.json
apiVersion: source.toolkit.fluxcd.io/v1
kind: OCIRepository
metadata:
name: rook-ceph-cluster
spec:
interval: 15m
layerSelector:
mediaType: application/vnd.cncf.helm.chart.content.v1.tar+gzip
operation: copy
ref:
tag: v1.19.1
url: oci://ghcr.io/rook/rook-ceph-cluster

View File

@@ -0,0 +1,6 @@
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./app.ks.yaml