From 3f61212aef1125d261ad8c52cb1e54d3a8e0ffd2 Mon Sep 17 00:00:00 2001 From: Justin Riley Date: Mon, 18 Nov 2024 16:35:21 -0500 Subject: [PATCH 1/6] ocp-test: add csi-wekafsplugin deployment --- .../csi-wekafsplugin/kustomization.yaml | 5 + .../secretstores/kustomization.yaml | 1 + .../csi-wekafsplugin-node/daemonset.yaml | 205 ++++++++++++++ .../csi-wekafsplugin-node/kustomization.yaml | 4 + .../deployment.yaml | 258 ++++++++++++++++++ .../kustomization.yaml | 4 + .../csi-wekafsplugin/kustomization.yaml | 5 + .../csi-wekafsplugin/namespace.yaml | 5 + .../kustomization.yaml | 4 + .../serviceaccount.yaml | 10 + .../csi-wekafsplugin-node/kustomization.yaml | 4 + .../csi-wekafsplugin-node/serviceaccount.yaml | 10 + csi-wekafsplugin/base/kustomization.yaml | 20 ++ .../kustomization.yaml | 4 + .../machineconfig.yaml | 35 +++ .../clusterrolebinding.yaml | 17 ++ .../kustomization.yaml | 4 + .../clusterrolebinding.yaml | 17 ++ .../csi-wekafsplugin-node/kustomization.yaml | 4 + .../clusterrole.yaml | 64 +++++ .../kustomization.yaml | 4 + .../csi-wekafsplugin-node/clusterrole.yaml | 34 +++ .../csi-wekafsplugin-node/kustomization.yaml | 4 + .../kustomization.yaml | 4 + .../rolebinding.yaml | 18 ++ .../kustomization.yaml | 4 + .../csi-wekafsplugin-controller/role.yaml | 13 + .../kustomization.yaml | 4 + .../securitycontextconstraints.yaml | 25 ++ .../kustomization.yaml | 4 + .../securitycontextconstraints.yaml | 25 ++ .../csidrivers/csi.weka.io/csidriver.yaml | 11 + .../csidrivers/csi.weka.io/kustomization.yaml | 4 + .../nerc-ocp-test/csi-wekafs-api-secret.yaml | 22 ++ .../overlays/nerc-ocp-test/kustomization.yaml | 8 + 35 files changed, 864 insertions(+) create mode 100644 cluster-scope/overlays/nerc-ocp-test/secretstores/csi-wekafsplugin/kustomization.yaml create mode 100644 csi-wekafsplugin/base/apps/daemonsets/csi-wekafsplugin-node/daemonset.yaml create mode 100644 csi-wekafsplugin/base/apps/daemonsets/csi-wekafsplugin-node/kustomization.yaml create mode 100644 csi-wekafsplugin/base/apps/deployments/csi-wekafsplugin-controller/deployment.yaml create mode 100644 csi-wekafsplugin/base/apps/deployments/csi-wekafsplugin-controller/kustomization.yaml create mode 100644 csi-wekafsplugin/base/core/namespaces/csi-wekafsplugin/kustomization.yaml create mode 100644 csi-wekafsplugin/base/core/namespaces/csi-wekafsplugin/namespace.yaml create mode 100644 csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-controller/kustomization.yaml create mode 100644 csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-controller/serviceaccount.yaml create mode 100644 csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-node/kustomization.yaml create mode 100644 csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-node/serviceaccount.yaml create mode 100644 csi-wekafsplugin/base/kustomization.yaml create mode 100644 csi-wekafsplugin/base/machineconfiguration.openshift.io/machineconfigs/50-csi-wekafs-selinux-policy-worker/kustomization.yaml create mode 100644 csi-wekafsplugin/base/machineconfiguration.openshift.io/machineconfigs/50-csi-wekafs-selinux-policy-worker/machineconfig.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-controller/clusterrolebinding.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-controller/kustomization.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node/clusterrolebinding.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node/kustomization.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-controller/clusterrole.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-controller/kustomization.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-node/clusterrole.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-node/kustomization.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/rolebindings/csi-wekafsplugin-controller/kustomization.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/rolebindings/csi-wekafsplugin-controller/rolebinding.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/roles/csi-wekafsplugin-controller/kustomization.yaml create mode 100644 csi-wekafsplugin/base/rbac.authorization.k8s.io/roles/csi-wekafsplugin-controller/role.yaml create mode 100644 csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/kustomization.yaml create mode 100644 csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/securitycontextconstraints.yaml create mode 100644 csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/kustomization.yaml create mode 100644 csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/securitycontextconstraints.yaml create mode 100644 csi-wekafsplugin/base/storage.k8s.io/csidrivers/csi.weka.io/csidriver.yaml create mode 100644 csi-wekafsplugin/base/storage.k8s.io/csidrivers/csi.weka.io/kustomization.yaml create mode 100644 csi-wekafsplugin/overlays/nerc-ocp-test/csi-wekafs-api-secret.yaml create mode 100644 csi-wekafsplugin/overlays/nerc-ocp-test/kustomization.yaml diff --git a/cluster-scope/overlays/nerc-ocp-test/secretstores/csi-wekafsplugin/kustomization.yaml b/cluster-scope/overlays/nerc-ocp-test/secretstores/csi-wekafsplugin/kustomization.yaml new file mode 100644 index 00000000..3f9da541 --- /dev/null +++ b/cluster-scope/overlays/nerc-ocp-test/secretstores/csi-wekafsplugin/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: csi-wekafsplugin +components: + - ../../../../components/nerc-secret-store diff --git a/cluster-scope/overlays/nerc-ocp-test/secretstores/kustomization.yaml b/cluster-scope/overlays/nerc-ocp-test/secretstores/kustomization.yaml index 4790303a..8c7f453e 100644 --- a/cluster-scope/overlays/nerc-ocp-test/secretstores/kustomization.yaml +++ b/cluster-scope/overlays/nerc-ocp-test/secretstores/kustomization.yaml @@ -9,3 +9,4 @@ resources: - dex - minio - danni-ilab +- csi-wekafsplugin diff --git a/csi-wekafsplugin/base/apps/daemonsets/csi-wekafsplugin-node/daemonset.yaml b/csi-wekafsplugin/base/apps/daemonsets/csi-wekafsplugin-node/daemonset.yaml new file mode 100644 index 00000000..b9261a69 --- /dev/null +++ b/csi-wekafsplugin/base/apps/daemonsets/csi-wekafsplugin-node/daemonset.yaml @@ -0,0 +1,205 @@ +# Source: csi-wekafsplugin/templates/nodeserver-daemonset.yaml +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-wekafsplugin-node + namespace: csi-wekafsplugin +spec: + selector: + matchLabels: + app: csi-wekafsplugin-node + template: + metadata: + labels: + app: csi-wekafsplugin-node + component: csi-wekafsplugin-node + release: csi-wekafsplugin + annotations: + prometheus.io/scrape: 'true' + prometheus.io/path: '/metrics' + prometheus.io/port: '9094' + spec: + serviceAccountName: csi-wekafsplugin-node + hostNetwork: true + initContainers: + - name: init + volumeMounts: + - mountPath: /etc/nodeinfo + name: nodeinfo + image: "quay.io/weka.io/csi-wekafs:v2.5.1" + imagePullPolicy: IfNotPresent + securityContext: + # This doesn't need to run as root. + runAsUser: 9376 + runAsGroup: 9376 + env: + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + command: + - bash + args: + - -c + - kubectl label node $NODENAME "topology.csi.weka.io/transport-" ; kubectl get node $NODENAME -o json | jq '.metadata' > /etc/nodeinfo/metadata + containers: + - name: wekafs + securityContext: + privileged: true + image: quay.io/weka.io/csi-wekafs:v2.5.1 + imagePullPolicy: Always + args: + - "--v=5" + - "--drivername=$(CSI_DRIVER_NAME)" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--dynamic-path=$(CSI_DYNAMIC_PATH)" + - "--csimode=$(X_CSI_MODE)" + - "--newvolumeprefix=csivol-" + - "--newsnapshotprefix=csisnp-" + - "--seedsnapshotprefix=csisnp-seed-" + - "--selinux-support" + - "--enablemetrics" + - "--metricsport=9094" + - "--allowinsecurehttps" + - "--mutuallyexclusivemountoptions=readcache,writecache,coherent,forcedirect" + - "--mutuallyexclusivemountoptions=sync,async" + - "--mutuallyexclusivemountoptions=ro,rw" + - "--grpcrequesttimeoutseconds=30" + - "--concurrency.nodePublishVolume=5" + - "--concurrency.nodeUnpublishVolume=5" + - "--allownfsfailback" + - "--nfsprotocolversion=4.1" + ports: + - containerPort: 9899 + name: healthz + protocol: TCP + - containerPort: 9094 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + env: + - name: CSI_DRIVER_NAME + value: csi.weka.io + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CSI_DYNAMIC_PATH + value: csi-volumes + - name: X_CSI_MODE + value: node + - name: KUBE_NODE_IP_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.hostIP + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /var/lib/kubelet/plugins + mountPropagation: Bidirectional + name: plugins-dir + - mountPath: /var/lib/csi-wekafs-data + name: csi-data-dir + - mountPath: /dev + name: dev-dir + - mountPath: /etc/nodeinfo + name: nodeinfo + readOnly: true + - mountPath: /etc/selinux/config + name: selinux-config + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: registry.k8s.io/sig-storage/livenessprobe:v2.14.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--health-port=$(HEALTH_PORT)" + env: + - name: ADDRESS + value: unix:///csi/csi.sock + - name: HEALTH_PORT + value: "9899" + - name: csi-registrar + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.12.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(KUBELET_REGISTRATION_PATH)" + - "--timeout=60s" + - "--health-port=9809" + ports: + - containerPort: 9809 + name: healthz + livenessProbe: + httpGet: + port: healthz + path: /healthz + initialDelaySeconds: 5 + timeoutSeconds: 5 + securityContext: + privileged: true + env: + - name: ADDRESS + value: unix:///csi/csi.sock + - name: KUBELET_REGISTRATION_PATH + value: "/var/lib/kubelet/plugins/csi-wekafs-node/csi.sock" + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /registration + name: registration-dir + - mountPath: /var/lib/csi-wekafs-data + name: csi-data-dir + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/pods + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins + type: Directory + name: plugins-dir + - hostPath: + path: /var/lib/kubelet/plugins/csi-wekafs-node + type: DirectoryOrCreate + name: socket-dir + - hostPath: + # 'path' is where PV data is persisted on host. + # using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot + path: /var/lib/csi-wekafs-data/ + type: DirectoryOrCreate + name: csi-data-dir + - hostPath: + path: /dev + type: Directory + name: dev-dir + # if enforced selinux or automatically detected OpenShift Container Platform, pass selinux-config + - hostPath: + path: /etc/selinux/config + type: File + name: selinux-config + - name: nodeinfo + emptyDir: {} diff --git a/csi-wekafsplugin/base/apps/daemonsets/csi-wekafsplugin-node/kustomization.yaml b/csi-wekafsplugin/base/apps/daemonsets/csi-wekafsplugin-node/kustomization.yaml new file mode 100644 index 00000000..48f1d3d7 --- /dev/null +++ b/csi-wekafsplugin/base/apps/daemonsets/csi-wekafsplugin-node/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - daemonset.yaml diff --git a/csi-wekafsplugin/base/apps/deployments/csi-wekafsplugin-controller/deployment.yaml b/csi-wekafsplugin/base/apps/deployments/csi-wekafsplugin-controller/deployment.yaml new file mode 100644 index 00000000..31e9b580 --- /dev/null +++ b/csi-wekafsplugin/base/apps/deployments/csi-wekafsplugin-controller/deployment.yaml @@ -0,0 +1,258 @@ +# Source: csi-wekafsplugin/templates/controllerserver-deployment.yaml +kind: Deployment +apiVersion: apps/v1 +metadata: + name: csi-wekafsplugin-controller + namespace: csi-wekafsplugin + labels: + app: csi-wekafsplugin-controller + component: csi-wekafsplugin-controller + release: csi-wekafsplugin +spec: + selector: + matchLabels: + app: csi-wekafsplugin-controller + replicas: 2 + template: + metadata: + labels: + app: csi-wekafsplugin-controller + component: csi-wekafsplugin-controller + release: csi-wekafsplugin + annotations: + prometheus.io/scrape: 'true' + prometheus.io/path: '/metrics' + prometheus.io/port: '9090,9091,9092,9093,9095' + spec: + serviceAccountName: csi-wekafsplugin-controller + hostNetwork: true + containers: + - name: wekafs + securityContext: + privileged: true + image: quay.io/weka.io/csi-wekafs:v2.5.1 + imagePullPolicy: Always + args: + - "--drivername=$(CSI_DRIVER_NAME)" + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + - "--dynamic-path=$(CSI_DYNAMIC_PATH)" + - "--csimode=$(X_CSI_MODE)" + - "--newvolumeprefix=csivol-" + - "--newsnapshotprefix=csisnp-" + - "--seedsnapshotprefix=csisnp-seed-" + - "--allowautofscreation" + - "--allowautofsexpansion" + - "--enablemetrics" + - "--metricsport=9090" + - "--allowinsecurehttps" + - "--mutuallyexclusivemountoptions=readcache,writecache,coherent,forcedirect" + - "--mutuallyexclusivemountoptions=sync,async" + - "--mutuallyexclusivemountoptions=ro,rw" + - "--grpcrequesttimeoutseconds=30" + - "--concurrency.createVolume=5" + - "--concurrency.deleteVolume=5" + - "--concurrency.expandVolume=5" + - "--concurrency.createSnapshot=5" + - "--concurrency.deleteSnapshot=5" + - "--allownfsfailback" + - "--nfsprotocolversion=4.1" + ports: + - containerPort: 9898 + name: healthz + protocol: TCP + - containerPort: 9090 + name: metrics + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: CSI_DRIVER_NAME + value: csi.weka.io + - name: CSI_DRIVER_VERSION + value: 2.5.1 + - name: X_CSI_MODE + value: controller + - name: CSI_DYNAMIC_PATH + value: csi-volumes + - name: X_CSI_DEBUG + value: "false" + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: KUBE_NODE_IP_ADDRESS + valueFrom: + fieldRef: + fieldPath: status.hostIP + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /var/lib/kubelet/plugins + mountPropagation: Bidirectional + name: plugins-dir + - mountPath: /var/lib/csi-wekafs-data + name: csi-data-dir + - mountPath: /dev + name: dev-dir + - name: csi-attacher + image: registry.k8s.io/sig-storage/csi-attacher:v4.7.0 + securityContext: + privileged: true + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + - "--timeout=60s" + - "--leader-election" + - "--leader-election-namespace=csi-wekafsplugin" + - "--worker-threads=5" + - "--http-endpoint=:9095" + env: + - name: ADDRESS + value: unix:///csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + livenessProbe: + httpGet: + port: 9095 + path: /healthz/leader-election + ports: + - containerPort: 9095 + name: pr-metrics + protocol: TCP + - name: csi-provisioner + image: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--feature-gates=Topology=true" + - "--timeout=60s" + - "--prevent-volume-mode-conversion" + - "--leader-election" + - "--leader-election-namespace=csi-wekafsplugin" + - "--worker-threads=5" + - "--retry-interval-start=10s" + - "--http-endpoint=:9091" + livenessProbe: + httpGet: + port: 9091 + path: /healthz/leader-election + env: + - name: ADDRESS + value: unix:///csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: "/csi" + ports: + - containerPort: 9091 + name: pr-metrics + protocol: TCP + - name: csi-resizer + image: registry.k8s.io/sig-storage/csi-resizer:v1.12.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--timeout=60s" + - "--http-endpoint=:9092" + - "--leader-election" + - "--leader-election-namespace=csi-wekafsplugin" + - "--workers=5" + - "--retry-interval-start=10s" + livenessProbe: + httpGet: + port: 9092 + path: /healthz/leader-election + env: + - name: ADDRESS + value: unix:///csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + ports: + - containerPort: 9092 + name: rs-metrics + protocol: TCP + - name: csi-snapshotter + image: registry.k8s.io/sig-storage/csi-snapshotter:v8.1.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--timeout=60s" + - "--leader-election" + - "--leader-election-namespace=csi-wekafsplugin" + - "--worker-threads=5" + - "--retry-interval-start=10s" + - "--http-endpoint=:9093" + livenessProbe: + httpGet: + port: 9093 + path: /healthz/leader-election + ports: + - containerPort: 9093 + name: sn-metrics + protocol: TCP + env: + - name: ADDRESS + value: unix:///csi/csi.sock + imagePullPolicy: IfNotPresent + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: registry.k8s.io/sig-storage/livenessprobe:v2.14.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + - "--health-port=$(HEALTH_PORT)" + env: + - name: ADDRESS + value: unix:///csi/csi.sock + - name: HEALTH_PORT + value: "9898" + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-wekafs-controller + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/pods + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins + type: Directory + name: plugins-dir + - hostPath: + # 'path' is where PV data is persisted on host. + # using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot + path: /var/lib/csi-wekafs-data/ + type: DirectoryOrCreate + name: csi-data-dir + - hostPath: + path: /dev + type: Directory + name: dev-dir diff --git a/csi-wekafsplugin/base/apps/deployments/csi-wekafsplugin-controller/kustomization.yaml b/csi-wekafsplugin/base/apps/deployments/csi-wekafsplugin-controller/kustomization.yaml new file mode 100644 index 00000000..9c2d28b0 --- /dev/null +++ b/csi-wekafsplugin/base/apps/deployments/csi-wekafsplugin-controller/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - deployment.yaml diff --git a/csi-wekafsplugin/base/core/namespaces/csi-wekafsplugin/kustomization.yaml b/csi-wekafsplugin/base/core/namespaces/csi-wekafsplugin/kustomization.yaml new file mode 100644 index 00000000..809cbe53 --- /dev/null +++ b/csi-wekafsplugin/base/core/namespaces/csi-wekafsplugin/kustomization.yaml @@ -0,0 +1,5 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - namespace.yaml diff --git a/csi-wekafsplugin/base/core/namespaces/csi-wekafsplugin/namespace.yaml b/csi-wekafsplugin/base/core/namespaces/csi-wekafsplugin/namespace.yaml new file mode 100644 index 00000000..98504df3 --- /dev/null +++ b/csi-wekafsplugin/base/core/namespaces/csi-wekafsplugin/namespace.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: csi-wekafsplugin +spec: {} diff --git a/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-controller/kustomization.yaml b/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-controller/kustomization.yaml new file mode 100644 index 00000000..cf30275b --- /dev/null +++ b/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-controller/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - serviceaccount.yaml diff --git a/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-controller/serviceaccount.yaml b/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-controller/serviceaccount.yaml new file mode 100644 index 00000000..15a6bee4 --- /dev/null +++ b/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-controller/serviceaccount.yaml @@ -0,0 +1,10 @@ +# Source: csi-wekafsplugin/templates/controllerserver-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-wekafsplugin-controller + namespace: csi-wekafsplugin + labels: + app: csi-wekafsplugin-controller + component: csi-wekafsplugin-controller + release: csi-wekafsplugin diff --git a/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-node/kustomization.yaml b/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-node/kustomization.yaml new file mode 100644 index 00000000..cf30275b --- /dev/null +++ b/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-node/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - serviceaccount.yaml diff --git a/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-node/serviceaccount.yaml b/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-node/serviceaccount.yaml new file mode 100644 index 00000000..5ebf516c --- /dev/null +++ b/csi-wekafsplugin/base/core/serviceaccounts/csi-wekafsplugin-node/serviceaccount.yaml @@ -0,0 +1,10 @@ +# Source: csi-wekafsplugin/templates/nodeserver-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-wekafsplugin-node + namespace: csi-wekafsplugin + labels: + app: csi-wekafsplugin-node + component: csi-wekafsplugin-node + release: csi-wekafsplugin diff --git a/csi-wekafsplugin/base/kustomization.yaml b/csi-wekafsplugin/base/kustomization.yaml new file mode 100644 index 00000000..ad55b59d --- /dev/null +++ b/csi-wekafsplugin/base/kustomization.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: csi-wekafsplugin +resources: + - core/namespaces/csi-wekafsplugin + - core/serviceaccounts/csi-wekafsplugin-node + - core/serviceaccounts/csi-wekafsplugin-controller + - storage.k8s.io/csidrivers/csi.weka.io + - security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc + - security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc + - rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node + - rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-controller + - rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-node + - rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-controller + - rbac.authorization.k8s.io/rolebindings/csi-wekafsplugin-controller + - rbac.authorization.k8s.io/roles/csi-wekafsplugin-controller + - machineconfiguration.openshift.io/machineconfigs/50-csi-wekafs-selinux-policy-worker + - apps/daemonsets/csi-wekafsplugin-node + - apps/deployments/csi-wekafsplugin-controller diff --git a/csi-wekafsplugin/base/machineconfiguration.openshift.io/machineconfigs/50-csi-wekafs-selinux-policy-worker/kustomization.yaml b/csi-wekafsplugin/base/machineconfiguration.openshift.io/machineconfigs/50-csi-wekafs-selinux-policy-worker/kustomization.yaml new file mode 100644 index 00000000..c2acbbd3 --- /dev/null +++ b/csi-wekafsplugin/base/machineconfiguration.openshift.io/machineconfigs/50-csi-wekafs-selinux-policy-worker/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - machineconfig.yaml diff --git a/csi-wekafsplugin/base/machineconfiguration.openshift.io/machineconfigs/50-csi-wekafs-selinux-policy-worker/machineconfig.yaml b/csi-wekafsplugin/base/machineconfiguration.openshift.io/machineconfigs/50-csi-wekafs-selinux-policy-worker/machineconfig.yaml new file mode 100644 index 00000000..857b48cd --- /dev/null +++ b/csi-wekafsplugin/base/machineconfiguration.openshift.io/machineconfigs/50-csi-wekafs-selinux-policy-worker/machineconfig.yaml @@ -0,0 +1,35 @@ +# Source: csi-wekafsplugin/templates/selinux-policy-machineconfig.yaml +kind: MachineConfig +apiVersion: machineconfiguration.openshift.io/v1 +metadata: + name: 50-csi-wekafs-selinux-policy-worker + labels: + machineconfiguration.openshift.io/role: worker +spec: + osImageURL: '' + config: + ignition: + version: 3.2.0 + storage: + files: + - filesystem: root + path: "/etc/selinux/csi-wekafs-selinux.cil" + contents: + source: data:text/plain;charset=utf-8;base64,KHR5cGVhbGlhcyB3ZWthZnNfY3NpX3ZvbHVtZV90KQoodHlwZWFsaWFzYWN0dWFsIHdla2Fmc19jc2lfdm9sdW1lX3Qgd2VrYWZzX3QpCih0eXBlYWxpYXMgd2VrYWZzX2ZpbGVzeXN0ZW1fdCkKKHR5cGVhbGlhc2FjdHVhbCB3ZWthZnNfZmlsZXN5c3RlbV90IHdla2Fmc190KQoodHlwZSB3ZWthZnNfdCkKKHJvbGV0eXBlIG9iamVjdF9yIHdla2Fmc190KQoodHlwZWF0dHJpYnV0ZXNldCBjaWxfZ2VuX3JlcXVpcmUgdW5sYWJlbGVkX3QpCih0eXBlYXR0cmlidXRlc2V0IGNpbF9nZW5fcmVxdWlyZSBjb250YWluZXJfdmFyX2xpYl90KQoodHlwZWF0dHJpYnV0ZXNldCBjaWxfZ2VuX3JlcXVpcmUgY29udGFpbmVyX3QpCih0eXBlYXR0cmlidXRlc2V0IGNpbF9nZW5fcmVxdWlyZSBzcG9vbGZpbGUpCih0eXBlYXR0cmlidXRlc2V0IHNwb29sZmlsZSAod2VrYWZzX3QgKSkKKHR5cGVhdHRyaWJ1dGVzZXQgY2lsX2dlbl9yZXF1aXJlIGZpbGVfdHlwZSkKKHR5cGVhdHRyaWJ1dGVzZXQgZmlsZV90eXBlICh3ZWthZnNfdCApKQoodHlwZWF0dHJpYnV0ZXNldCBjaWxfZ2VuX3JlcXVpcmUgbm9uX3NlY3VyaXR5X2ZpbGVfdHlwZSkKKHR5cGVhdHRyaWJ1dGVzZXQgbm9uX3NlY3VyaXR5X2ZpbGVfdHlwZSAod2VrYWZzX3QgKSkKKHR5cGVhdHRyaWJ1dGVzZXQgY2lsX2dlbl9yZXF1aXJlIG5vbl9hdXRoX2ZpbGVfdHlwZSkKKHR5cGVhdHRyaWJ1dGVzZXQgbm9uX2F1dGhfZmlsZV90eXBlICh3ZWthZnNfdCApKQooYWxsb3cgY29udGFpbmVyX3Qgd2VrYWZzX3QgKGRpciAoYWRkX25hbWUgY3JlYXRlIGdldGF0dHIgaW9jdGwgbGluayBsb2NrIG9wZW4gcmVhZCByZW1vdmVfbmFtZSByZW5hbWUgcmVwYXJlbnQgcm1kaXIgc2VhcmNoIHNldGF0dHIgdW5saW5rIHdyaXRlKSkpCihhbGxvdyBjb250YWluZXJfdCB3ZWthZnNfdCAoZmlsZSAoY3JlYXRlIG9wZW4gZ2V0YXR0ciBzZXRhdHRyIHJlYWQgd3JpdGUgYXBwZW5kIHJlbmFtZSBsaW5rIHVubGluayBpb2N0bCBsb2NrKSkpCg== + verification: {} + mode: 0755 + systemd: + units: + - contents: | + [Unit] + Requires=systemd-udevd.target + After=NetworkManager.service + Before=sshd.service + DefaultDependencies=no + [Service] + Type=oneshot + ExecStart=/usr/sbin/semodule -i /etc/selinux/csi-wekafs-selinux.cil + [Install] + WantedBy=multi-user.target + name: csi-wekafs-selinux-policy.service + enabled: true diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-controller/clusterrolebinding.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-controller/clusterrolebinding.yaml new file mode 100644 index 00000000..fbb08fae --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-controller/clusterrolebinding.yaml @@ -0,0 +1,17 @@ +# Source: csi-wekafsplugin/templates/controllerserver-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-wekafsplugin-controller + labels: + app: csi-wekafsplugin-controller + component: csi-wekafsplugin-controller + release: csi-wekafsplugin +subjects: + - kind: ServiceAccount + name: csi-wekafsplugin-controller + namespace: csi-wekafsplugin +roleRef: + kind: ClusterRole + name: csi-wekafsplugin-controller + apiGroup: rbac.authorization.k8s.io diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-controller/kustomization.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-controller/kustomization.yaml new file mode 100644 index 00000000..6e78ec1e --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-controller/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - clusterrolebinding.yaml diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node/clusterrolebinding.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node/clusterrolebinding.yaml new file mode 100644 index 00000000..ca9c7125 --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node/clusterrolebinding.yaml @@ -0,0 +1,17 @@ +# Source: csi-wekafsplugin/templates/nodeserver-clusterrolebinding.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-wekafsplugin-node + labels: + app: csi-wekafsplugin-node + component: csi-wekafsplugin-node + release: csi-wekafsplugin +subjects: + - kind: ServiceAccount + name: csi-wekafsplugin-node + namespace: csi-wekafsplugin +roleRef: + kind: ClusterRole + name: csi-wekafsplugin-node + apiGroup: rbac.authorization.k8s.io diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node/kustomization.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node/kustomization.yaml new file mode 100644 index 00000000..6e78ec1e --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - clusterrolebinding.yaml diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-controller/clusterrole.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-controller/clusterrole.yaml new file mode 100644 index 00000000..796d2b61 --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-controller/clusterrole.yaml @@ -0,0 +1,64 @@ +# Source: csi-wekafsplugin/templates/controllerserver-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-wekafsplugin-controller + labels: + app: csi-wekafsplugin-controller + component: csi-wekafsplugin-controller + release: csi-wekafsplugin +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update", "create", "get", "list", "watch", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-controller/kustomization.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-controller/kustomization.yaml new file mode 100644 index 00000000..b9dcee18 --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-controller/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - clusterrole.yaml diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-node/clusterrole.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-node/clusterrole.yaml new file mode 100644 index 00000000..b8b8ac51 --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-node/clusterrole.yaml @@ -0,0 +1,34 @@ +# Source: csi-wekafsplugin/templates/nodeserver-clusterrole.yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-wekafsplugin-node + labels: + app: csi-wekafsplugin-node + component: csi-wekafsplugin-node + release: csi-wekafsplugin +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["create", "delete", "get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["persistentvolumesclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update"] diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-node/kustomization.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-node/kustomization.yaml new file mode 100644 index 00000000..b9dcee18 --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/clusterroles/csi-wekafsplugin-node/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - clusterrole.yaml diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/rolebindings/csi-wekafsplugin-controller/kustomization.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/rolebindings/csi-wekafsplugin-controller/kustomization.yaml new file mode 100644 index 00000000..907a3073 --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/rolebindings/csi-wekafsplugin-controller/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - rolebinding.yaml diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/rolebindings/csi-wekafsplugin-controller/rolebinding.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/rolebindings/csi-wekafsplugin-controller/rolebinding.yaml new file mode 100644 index 00000000..5d78ac12 --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/rolebindings/csi-wekafsplugin-controller/rolebinding.yaml @@ -0,0 +1,18 @@ +# Source: csi-wekafsplugin/templates/controllerserver-rolebinding.yaml +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-wekafsplugin-controller + labels: + app: csi-wekafsplugin-controller + component: csi-wekafsplugin-controller + release: csi-wekafsplugin + namespace: csi-wekafsplugin +subjects: + - kind: ServiceAccount + name: csi-wekafsplugin-controller + namespace: csi-wekafsplugin +roleRef: + kind: Role + name: csi-wekafsplugin-controller + apiGroup: rbac.authorization.k8s.io diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/roles/csi-wekafsplugin-controller/kustomization.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/roles/csi-wekafsplugin-controller/kustomization.yaml new file mode 100644 index 00000000..4f84bb68 --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/roles/csi-wekafsplugin-controller/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - role.yaml diff --git a/csi-wekafsplugin/base/rbac.authorization.k8s.io/roles/csi-wekafsplugin-controller/role.yaml b/csi-wekafsplugin/base/rbac.authorization.k8s.io/roles/csi-wekafsplugin-controller/role.yaml new file mode 100644 index 00000000..ae39c740 --- /dev/null +++ b/csi-wekafsplugin/base/rbac.authorization.k8s.io/roles/csi-wekafsplugin-controller/role.yaml @@ -0,0 +1,13 @@ +# Source: csi-wekafsplugin/templates/controllerserver-role.yaml +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-wekafsplugin-controller + labels: + app: csi-wekafsplugin-controller + component: csi-wekafsplugin-controller + release: csi-wekafsplugin +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] diff --git a/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/kustomization.yaml b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/kustomization.yaml new file mode 100644 index 00000000..db4e7d81 --- /dev/null +++ b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - securitycontextconstraints.yaml diff --git a/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/securitycontextconstraints.yaml b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/securitycontextconstraints.yaml new file mode 100644 index 00000000..6a524deb --- /dev/null +++ b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/securitycontextconstraints.yaml @@ -0,0 +1,25 @@ +# Source: csi-wekafsplugin/templates/controllerserver-security-context-constraint.yaml +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: csi-wekafsplugin-controller-scc +allowHostIPC: false +allowHostPID: false +allowPrivilegedContainer: true +allowHostDirVolumePlugin: true +allowHostNetwork: true +allowedVolumeTypes: + - hostPath + - secret +readOnlyRootFilesystem: false +allowHostPorts: true +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +users: + - system:serviceaccount:csi-wekafsplugin:csi-wekafsplugin-controller diff --git a/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/kustomization.yaml b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/kustomization.yaml new file mode 100644 index 00000000..db4e7d81 --- /dev/null +++ b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - securitycontextconstraints.yaml diff --git a/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/securitycontextconstraints.yaml b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/securitycontextconstraints.yaml new file mode 100644 index 00000000..74c8f9fe --- /dev/null +++ b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/securitycontextconstraints.yaml @@ -0,0 +1,25 @@ +# Source: csi-wekafsplugin/templates/nodeserver-security-context-constraint.yaml +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: csi-wekafsplugin-node-scc +allowHostIPC: false +allowHostPID: false +allowPrivilegedContainer: true +allowHostDirVolumePlugin: true +allowHostNetwork: true +allowedVolumeTypes: + - hostPath + - secret +readOnlyRootFilesystem: false +allowHostPorts: true +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +users: + - system:serviceaccount:csi-wekafsplugin:csi-wekafsplugin-node diff --git a/csi-wekafsplugin/base/storage.k8s.io/csidrivers/csi.weka.io/csidriver.yaml b/csi-wekafsplugin/base/storage.k8s.io/csidrivers/csi.weka.io/csidriver.yaml new file mode 100644 index 00000000..845d151d --- /dev/null +++ b/csi-wekafsplugin/base/storage.k8s.io/csidrivers/csi.weka.io/csidriver.yaml @@ -0,0 +1,11 @@ +# Source: csi-wekafsplugin/templates/driver.yaml +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: csi.weka.io +spec: + attachRequired: true + podInfoOnMount: true + volumeLifecycleModes: + - Persistent + fsGroupPolicy: File diff --git a/csi-wekafsplugin/base/storage.k8s.io/csidrivers/csi.weka.io/kustomization.yaml b/csi-wekafsplugin/base/storage.k8s.io/csidrivers/csi.weka.io/kustomization.yaml new file mode 100644 index 00000000..5b3bcee2 --- /dev/null +++ b/csi-wekafsplugin/base/storage.k8s.io/csidrivers/csi.weka.io/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - csidriver.yaml diff --git a/csi-wekafsplugin/overlays/nerc-ocp-test/csi-wekafs-api-secret.yaml b/csi-wekafsplugin/overlays/nerc-ocp-test/csi-wekafs-api-secret.yaml new file mode 100644 index 00000000..c553f128 --- /dev/null +++ b/csi-wekafsplugin/overlays/nerc-ocp-test/csi-wekafs-api-secret.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: external-secrets.io/v1beta1 +kind: ExternalSecret +metadata: + annotations: + argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true + labels: + nerc.mghpcc.org/kustomized: "true" + name: csi-wekafs-api-secret + namespace: csi-wekafsplugin +spec: + dataFrom: + - extract: + key: nerc/nerc-ocp-test/weka/csi-wekafs-api-secret + secretStoreRef: + kind: SecretStore + name: nerc-secret-store + target: + name: csi-wekafs-api-secret + template: + metadata: + labels: {} diff --git a/csi-wekafsplugin/overlays/nerc-ocp-test/kustomization.yaml b/csi-wekafsplugin/overlays/nerc-ocp-test/kustomization.yaml new file mode 100644 index 00000000..04eb3950 --- /dev/null +++ b/csi-wekafsplugin/overlays/nerc-ocp-test/kustomization.yaml @@ -0,0 +1,8 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +commonLabels: + nerc.mghpcc.org/kustomized: "true" + +resources: + - ../../base + - csi-wekafs-api-secret.yaml From 443f9db0726234d9905dd08e9aff8c343fd09a7d Mon Sep 17 00:00:00 2001 From: Justin Riley Date: Tue, 19 Nov 2024 11:03:48 -0500 Subject: [PATCH 2/6] remove undefined allowedVolumeTypes property in scc Replaced this with volumes: ["*"] given that allowedVolumeTypes property doesn't exist. This is also the default value when the volumes property is not explicitly defined. --- .../securitycontextconstraints.yaml | 5 ++--- .../securitycontextconstraints.yaml | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/securitycontextconstraints.yaml b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/securitycontextconstraints.yaml index 6a524deb..e542acac 100644 --- a/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/securitycontextconstraints.yaml +++ b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc/securitycontextconstraints.yaml @@ -8,9 +8,6 @@ allowHostPID: false allowPrivilegedContainer: true allowHostDirVolumePlugin: true allowHostNetwork: true -allowedVolumeTypes: - - hostPath - - secret readOnlyRootFilesystem: false allowHostPorts: true runAsUser: @@ -23,3 +20,5 @@ supplementalGroups: type: RunAsAny users: - system:serviceaccount:csi-wekafsplugin:csi-wekafsplugin-controller +volumes: + - '*' diff --git a/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/securitycontextconstraints.yaml b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/securitycontextconstraints.yaml index 74c8f9fe..786d683d 100644 --- a/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/securitycontextconstraints.yaml +++ b/csi-wekafsplugin/base/security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc/securitycontextconstraints.yaml @@ -8,9 +8,6 @@ allowHostPID: false allowPrivilegedContainer: true allowHostDirVolumePlugin: true allowHostNetwork: true -allowedVolumeTypes: - - hostPath - - secret readOnlyRootFilesystem: false allowHostPorts: true runAsUser: @@ -23,3 +20,5 @@ supplementalGroups: type: RunAsAny users: - system:serviceaccount:csi-wekafsplugin:csi-wekafsplugin-node +volumes: + - '*' From 1e938775af3893ff902df80d9dc9e7d73da5503c Mon Sep 17 00:00:00 2001 From: Justin Riley Date: Tue, 19 Nov 2024 12:11:18 -0500 Subject: [PATCH 3/6] add weka storageclass for RWX volumes --- csi-wekafsplugin/base/kustomization.yaml | 1 + .../kustomization.yaml | 4 ++ .../storageclass.yaml | 39 +++++++++++++++++++ 3 files changed, 44 insertions(+) create mode 100644 csi-wekafsplugin/base/storage.k8s.io/storageclasses/storageclass-wekafs-dir-api/kustomization.yaml create mode 100644 csi-wekafsplugin/base/storage.k8s.io/storageclasses/storageclass-wekafs-dir-api/storageclass.yaml diff --git a/csi-wekafsplugin/base/kustomization.yaml b/csi-wekafsplugin/base/kustomization.yaml index ad55b59d..36593fd3 100644 --- a/csi-wekafsplugin/base/kustomization.yaml +++ b/csi-wekafsplugin/base/kustomization.yaml @@ -7,6 +7,7 @@ resources: - core/serviceaccounts/csi-wekafsplugin-node - core/serviceaccounts/csi-wekafsplugin-controller - storage.k8s.io/csidrivers/csi.weka.io + - storage.k8s.io/storageclasses/storageclass-wekafs-dir-api - security.openshift.io/securitycontextconstraints/csi-wekafsplugin-node-scc - security.openshift.io/securitycontextconstraints/csi-wekafsplugin-controller-scc - rbac.authorization.k8s.io/clusterrolebindings/csi-wekafsplugin-node diff --git a/csi-wekafsplugin/base/storage.k8s.io/storageclasses/storageclass-wekafs-dir-api/kustomization.yaml b/csi-wekafsplugin/base/storage.k8s.io/storageclasses/storageclass-wekafs-dir-api/kustomization.yaml new file mode 100644 index 00000000..23c2e0c6 --- /dev/null +++ b/csi-wekafsplugin/base/storage.k8s.io/storageclasses/storageclass-wekafs-dir-api/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - storageclass.yaml diff --git a/csi-wekafsplugin/base/storage.k8s.io/storageclasses/storageclass-wekafs-dir-api/storageclass.yaml b/csi-wekafsplugin/base/storage.k8s.io/storageclasses/storageclass-wekafs-dir-api/storageclass.yaml new file mode 100644 index 00000000..a097eaa2 --- /dev/null +++ b/csi-wekafsplugin/base/storage.k8s.io/storageclasses/storageclass-wekafs-dir-api/storageclass.yaml @@ -0,0 +1,39 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: storageclass-wekafs-dir-api +provisioner: csi.weka.io +reclaimPolicy: Delete +volumeBindingMode: Immediate +allowVolumeExpansion: true +parameters: + volumeType: dir/v1 + filesystemName: default + # optional parameters setting UID, GID and permissions on volume + # UID of the volume owner, default 0 (root) + # ownerUid: "1000" + + # GID of the volume owner, default 0 (root) + # ownerGid: "1000" + + # permissions in Unix octal format, default "0750" + # permissions: "0775" + + # capacity enforcement mode (either SOFT or HARD) + # - HARD or unspecified: pod will not be able to write above quota + # - SOFT: warning will be issued on Weka cluster, but writing will not be blocked + capacityEnforcement: HARD + # name of the secret that stores API credentials for a cluster + # change the name of secret to match secret of a particular cluster (if you have several Weka clusters) + csi.storage.k8s.io/provisioner-secret-name: &secretName csi-wekafs-api-secret + # change the name of the namespace in which the cluster API credentials + csi.storage.k8s.io/provisioner-secret-namespace: &secretNamespace csi-wekafsplugin + # do not change anything below this line, or set to same parameters as above + csi.storage.k8s.io/controller-publish-secret-name: *secretName + csi.storage.k8s.io/controller-publish-secret-namespace: *secretNamespace + csi.storage.k8s.io/controller-expand-secret-name: *secretName + csi.storage.k8s.io/controller-expand-secret-namespace: *secretNamespace + csi.storage.k8s.io/node-stage-secret-name: *secretName + csi.storage.k8s.io/node-stage-secret-namespace: *secretNamespace + csi.storage.k8s.io/node-publish-secret-name: *secretName + csi.storage.k8s.io/node-publish-secret-namespace: *secretNamespace From b56654cb2375f5952d54de10809b73c5720dc933 Mon Sep 17 00:00:00 2001 From: Justin Riley Date: Tue, 15 Jul 2025 15:40:48 -0400 Subject: [PATCH 4/6] WIP: add weka operator deployment --- weka-operator/client.yaml | 27 + weka-operator/ocp-driver-toolkit-images.yaml | 33 + weka-operator/rolebinding.yaml | 13 + weka-operator/run.sh | 27 + weka-operator/service.yaml | 14 + weka-operator/weka-client-sa.yaml | 11 + weka-operator/weka-operator/.helmignore | 23 + weka-operator/weka-operator/Chart.yaml | 10 + weka-operator/weka-operator/crds/.gitkeep | 0 .../crds/weka.weka.io_driveclaims.yaml | 75 + .../crds/weka.weka.io_wekaclients.yaml | 435 ++ .../crds/weka.weka.io_wekaclusters.yaml | 6384 +++++++++++++++++ .../crds/weka.weka.io_wekacontainers.yaml | 1878 +++++ .../weka.weka.io_wekamanualoperations.yaml | 299 + .../crds/weka.weka.io_wekapolicies.yaml | 287 + .../weka-operator/resources/cos-devenv.sh | 671 ++ .../weka-operator/resources/run-weka-cli.sh | 27 + .../weka-operator/resources/syslog-ng.conf | 60 + .../weka-operator/resources/weka_runtime.py | 2669 +++++++ .../weka-operator/templates/NOTES.txt | 8 + .../weka-operator/templates/_helpers.tpl | 62 + .../auth_proxy_client_clusterrole.yaml | 17 + .../templates/auth_proxy_role.yaml | 24 + .../templates/auth_proxy_role_binding.yaml | 19 + .../templates/auth_proxy_service.yaml | 20 + .../templates/client_editor_role.yaml | 31 + .../templates/client_viewer_role.yaml | 27 + .../templates/leader_election_role.yaml | 44 + .../leader_election_role_binding.yaml | 19 + .../maintenance_service_account.yaml | 11 + .../weka-operator/templates/manager.yaml | 247 + .../templates/metrics_daemonset.yaml | 121 + .../templates/node_describe_role.yaml | 15 + ...intenance_security_context_constraint.yaml | 23 + .../templates/ocp_node_tuning.yaml | 28 + .../ocp_security_context_constraint.yaml | 24 + .../templates/ocp_versions_configmap.yaml | 26 + .../weka-operator/templates/podmonitor.yaml | 33 + .../weka-operator/templates/role.yaml | 140 + .../weka-operator/templates/role_binding.yaml | 19 + .../templates/service_account.yaml | 11 + .../weka_boot_scripts_configmap.yaml | 13 + weka-operator/weka-operator/values.yaml | 166 + weka-operator/wekacontainer.yaml | 18 + 44 files changed, 14109 insertions(+) create mode 100644 weka-operator/client.yaml create mode 100644 weka-operator/ocp-driver-toolkit-images.yaml create mode 100644 weka-operator/rolebinding.yaml create mode 100644 weka-operator/run.sh create mode 100644 weka-operator/service.yaml create mode 100644 weka-operator/weka-client-sa.yaml create mode 100644 weka-operator/weka-operator/.helmignore create mode 100644 weka-operator/weka-operator/Chart.yaml create mode 100644 weka-operator/weka-operator/crds/.gitkeep create mode 100644 weka-operator/weka-operator/crds/weka.weka.io_driveclaims.yaml create mode 100644 weka-operator/weka-operator/crds/weka.weka.io_wekaclients.yaml create mode 100644 weka-operator/weka-operator/crds/weka.weka.io_wekaclusters.yaml create mode 100644 weka-operator/weka-operator/crds/weka.weka.io_wekacontainers.yaml create mode 100644 weka-operator/weka-operator/crds/weka.weka.io_wekamanualoperations.yaml create mode 100644 weka-operator/weka-operator/crds/weka.weka.io_wekapolicies.yaml create mode 100644 weka-operator/weka-operator/resources/cos-devenv.sh create mode 100644 weka-operator/weka-operator/resources/run-weka-cli.sh create mode 100644 weka-operator/weka-operator/resources/syslog-ng.conf create mode 100644 weka-operator/weka-operator/resources/weka_runtime.py create mode 100644 weka-operator/weka-operator/templates/NOTES.txt create mode 100644 weka-operator/weka-operator/templates/_helpers.tpl create mode 100644 weka-operator/weka-operator/templates/auth_proxy_client_clusterrole.yaml create mode 100644 weka-operator/weka-operator/templates/auth_proxy_role.yaml create mode 100644 weka-operator/weka-operator/templates/auth_proxy_role_binding.yaml create mode 100644 weka-operator/weka-operator/templates/auth_proxy_service.yaml create mode 100644 weka-operator/weka-operator/templates/client_editor_role.yaml create mode 100644 weka-operator/weka-operator/templates/client_viewer_role.yaml create mode 100644 weka-operator/weka-operator/templates/leader_election_role.yaml create mode 100644 weka-operator/weka-operator/templates/leader_election_role_binding.yaml create mode 100644 weka-operator/weka-operator/templates/maintenance_service_account.yaml create mode 100644 weka-operator/weka-operator/templates/manager.yaml create mode 100644 weka-operator/weka-operator/templates/metrics_daemonset.yaml create mode 100644 weka-operator/weka-operator/templates/node_describe_role.yaml create mode 100644 weka-operator/weka-operator/templates/ocp_maintenance_security_context_constraint.yaml create mode 100644 weka-operator/weka-operator/templates/ocp_node_tuning.yaml create mode 100644 weka-operator/weka-operator/templates/ocp_security_context_constraint.yaml create mode 100644 weka-operator/weka-operator/templates/ocp_versions_configmap.yaml create mode 100644 weka-operator/weka-operator/templates/podmonitor.yaml create mode 100644 weka-operator/weka-operator/templates/role.yaml create mode 100644 weka-operator/weka-operator/templates/role_binding.yaml create mode 100644 weka-operator/weka-operator/templates/service_account.yaml create mode 100644 weka-operator/weka-operator/templates/weka_boot_scripts_configmap.yaml create mode 100644 weka-operator/weka-operator/values.yaml create mode 100644 weka-operator/wekacontainer.yaml diff --git a/weka-operator/client.yaml b/weka-operator/client.yaml new file mode 100644 index 00000000..3fc0018c --- /dev/null +++ b/weka-operator/client.yaml @@ -0,0 +1,27 @@ +apiVersion: weka.weka.io/v1alpha1 +kind: WekaClient +metadata: + name: weka-clients + namespace: weka-operator-system +spec: + serviceAccountName: weka-client + image: quay.io/weka.io/weka-in-container:4.4.5.128-k8s + imagePullSecret: weka-quayio-creds + driversDistService: "https://weka-driver-builder:60002" + port: 45001 + agentPort: 45000 + network: + #ethDevice: bond0.2175 + udpMode: true + deviceSubnets: + - 10.30.12.0/24 + coresNum: 2 + joinIpPorts: + - 10.30.11.4:14000 + - 10.30.11.5:14000 + - 10.30.11.6:14000 + nodeSelector: + node-role.kubernetes.io/worker: "" + tracesConfiguration: + maxCapacityPerIoNode: 10 + ensureFreeSpace: 50 diff --git a/weka-operator/ocp-driver-toolkit-images.yaml b/weka-operator/ocp-driver-toolkit-images.yaml new file mode 100644 index 00000000..a909678a --- /dev/null +++ b/weka-operator/ocp-driver-toolkit-images.yaml @@ -0,0 +1,33 @@ +apiVersion: v1 +data: + 414.92.202404162000-0: 259cbd840454c6d9030c21a5d24be0599abc4941cdd525a80f6eeb5d67e7908c + 414.92.202404231906-0: 20dce872df7c233a34179b4356acc1c6cbd80c56cff053fa437de3b6595f9710 + 414.92.202404301839-0: 20dce872df7c233a34179b4356acc1c6cbd80c56cff053fa437de3b6595f9710 + 415.92.202402201450-0: cd0ea5d8ec43c5b03bf362e0b595bafe3e97e222d4344a851453ebe8770df135 + 415.92.202403061641-0: 9d974e00ebe924fbd03abf03c55d873108a1593b5a5e60f0daf4b867fc5bb1b1 + 415.92.202403080220-0: 9d974e00ebe924fbd03abf03c55d873108a1593b5a5e60f0daf4b867fc5bb1b1 + 415.92.202403191241-0: abbff60a77f7ac2276dbeef33fb46ed32c9b9eb1c5813260c6383605bed76a08 + 415.92.202403270524-0: b9cd86347ba410c90b4a34fe9c1b25951e0f0cd38ceca1d3ccd4bae96f084edb + 415.92.202404161628-0: be818782c507d76b48f9f37bcf85e5d5311514ff9e6108b047f80bf6331e63f5 + 415.92.202404251009-0: bae8035c05d095e84c62efcab6202330a98493cab03e091c81a0b792afb5672c + 415.92.202404302054-0: bae8035c05d095e84c62efcab6202330a98493cab03e091c81a0b792afb5672c + 415.92.202405070140-0: 985b72435a7091702a520581eb51ebd439bfe6ff39c33ffaaad7e30b9e321454 + 415.92.202405130844-0: 985b72435a7091702a520581eb51ebd439bfe6ff39c33ffaaad7e30b9e321454 + 415.92.202405201956-0: 934af754e2fbc8ed5deb7c4b22299c6c7b4504e6d8d9fd50fc3ad374616d70a9 + 415.92.202405281402-0: d493e0bd8959e0d117493453db9c183e8bca34f73eb89b091134a087554fa0e8 + 415.92.202406041802-0: 9d2c61bf746c966f71bc6c6a3797303a7d3bfaef09040dfde85f19383d19681b + 415.92.202406111137-0: efa99ae171e7db22aa2d320b7bc78e950db01987889b6a8529e1945670e80792 + 416.94.202406172220-0: dde3cd6a75d865a476aa7e1cab6fa8d97742401e87e0d514f3042c3a881e301f + 416.94.202406251923-0: 8ef92caba7bd5d6ab3a139da782bf5651c2a40802eaa33b0c7899a7e897e007b + 416.94.202407030122-0: e5e6de7572003ac560f113a0082594a585c49d51801f028f699b15262eff7c02 + 416.94.202407081958-0: a73204d0c03454b02656801ca4c49cb2d8b0d54645bb90f74622df335c82dce1 + 417.94.202502111408-0: 58132e0e30cff950293e940e3188f0e74fba91fd60ade4908239d11a0e9d988c +kind: ConfigMap +metadata: + annotations: + meta.helm.sh/release-name: weka-operator + meta.helm.sh/release-namespace: weka-operator-system + labels: + app.kubernetes.io/managed-by: Helm + name: ocp-driver-toolkit-images + namespace: weka-operator-system diff --git a/weka-operator/rolebinding.yaml b/weka-operator/rolebinding.yaml new file mode 100644 index 00000000..48a3150d --- /dev/null +++ b/weka-operator/rolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: weka-client-privileged + namespace: weka-operator-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:openshift:scc:privileged +subjects: +- kind: ServiceAccount + name: weka-client + namespace: weka-operator-system diff --git a/weka-operator/run.sh b/weka-operator/run.sh new file mode 100644 index 00000000..a8ebea5c --- /dev/null +++ b/weka-operator/run.sh @@ -0,0 +1,27 @@ +#!/bin/bash +set -ex + +#kubectl create ns weka-operator-system --as=system:admin +# +#kubectl create --as=system:admin secret docker-registry weka-quayio-creds \ +# --docker-server=quay.io \ +# --docker-username=$QUAY_USERNAME \ +# --docker-password=$QUAY_PASSWORD \ +# --docker-email=$QUAY_USERNAME \ +# --namespace=weka-operator-system + + +#helm upgrade --install weka-operator oci://quay.io/weka.io/helm/weka-operator \ +# --namespace weka-operator-system \ +# --version v1.6.2 \ +# --set nodeAgent.persistencePaths=/root/k8s-weka \ +# --set ocpCompatibility.hugepageConfiguration.enabled=true \ +# --set ocpCompatibility.hugepageConfiguration.hugepagesCount=4000 \ +# --set debugSleep=3600 + +helm upgrade --install weka-operator oci://quay.io/weka.io/helm/weka-operator \ + --namespace weka-operator-system \ + --version v1.7.0 \ + --set nodeAgent.persistencePaths=/root/k8s-weka \ + --set ocpCompatibility.hugepageConfiguration.enabled=true \ + --set ocpCompatibility.hugepageConfiguration.hugepagesCount=4000 diff --git a/weka-operator/service.yaml b/weka-operator/service.yaml new file mode 100644 index 00000000..5feb4cf8 --- /dev/null +++ b/weka-operator/service.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: weka-driver-builder + namespace: weka-operator-system +spec: + type: ClusterIP + ports: + - name: weka-driver-builder + port: 60002 + targetPort: 60002 + selector: + app: weka-driver-builder diff --git a/weka-operator/weka-client-sa.yaml b/weka-operator/weka-client-sa.yaml new file mode 100644 index 00000000..791b6373 --- /dev/null +++ b/weka-operator/weka-client-sa.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +imagePullSecrets: +- name: weka-client-dockercfg-n9ptb +kind: ServiceAccount +metadata: + annotations: + openshift.io/internal-registry-pull-secret-ref: weka-client-dockercfg-n9ptb + name: weka-client + namespace: weka-operator-system +secrets: +- name: weka-client-dockercfg-n9ptb diff --git a/weka-operator/weka-operator/.helmignore b/weka-operator/weka-operator/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/weka-operator/weka-operator/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/weka-operator/weka-operator/Chart.yaml b/weka-operator/weka-operator/Chart.yaml new file mode 100644 index 00000000..0bd32717 --- /dev/null +++ b/weka-operator/weka-operator/Chart.yaml @@ -0,0 +1,10 @@ +apiVersion: v2 +appVersion: 4.2.7.815-d3e193ee4ceb15033f5207b466815c3c +description: A Helm chart for Kubernetes +maintainers: +- email: csi@weka.io + name: weka + url: https://weka.io +name: weka-operator +type: application +version: v1.6.2 diff --git a/weka-operator/weka-operator/crds/.gitkeep b/weka-operator/weka-operator/crds/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/weka-operator/weka-operator/crds/weka.weka.io_driveclaims.yaml b/weka-operator/weka-operator/crds/weka.weka.io_driveclaims.yaml new file mode 100644 index 00000000..36b866e8 --- /dev/null +++ b/weka-operator/weka-operator/crds/weka.weka.io_driveclaims.yaml @@ -0,0 +1,75 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: driveclaims.weka.weka.io +spec: + group: weka.weka.io + names: + kind: DriveClaim + listKind: DriveClaimList + plural: driveclaims + singular: driveclaim + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Owner + jsonPath: .spec.owner + name: Owner + type: string + - description: Device + jsonPath: .spec.device + name: Device + type: string + - description: Drive UUUD + jsonPath: .spec.driveUuid + name: Drive UUID + priority: 1 + type: string + - description: Creation time + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + device: + type: string + driveUuid: + type: string + owner: + type: string + required: + - device + - driveUuid + - owner + type: object + status: + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/weka-operator/weka-operator/crds/weka.weka.io_wekaclients.yaml b/weka-operator/weka-operator/crds/weka.weka.io_wekaclients.yaml new file mode 100644 index 00000000..92d307b5 --- /dev/null +++ b/weka-operator/weka-operator/crds/weka.weka.io_wekaclients.yaml @@ -0,0 +1,435 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: wekaclients.weka.weka.io +spec: + group: weka.weka.io + names: + kind: WekaClient + listKind: WekaClientList + plural: wekaclients + singular: wekaclient + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Resource status + jsonPath: .status.status + name: Status + type: string + - description: Name of the target cluster if exists + jsonPath: .spec.targetCluster.name + name: Target Cluster + type: string + - description: Number of cores + jsonPath: .spec.coresNum + name: Cores + type: integer + - description: IPs of the target cluster + jsonPath: .spec.joinIpPorts + name: Join IPs + priority: 1 + type: string + - description: 'Number of client containers: Active/Created/Desired' + jsonPath: .status.printer.containers + name: Containers(A/C/D) + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: WekaClient is the Schema for the clients API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WekaClientSpec defines the desired state of WekaClient + properties: + additionalMemory: + description: memory to add/decrease from "auto-calculated" memory + type: integer + agentPort: + description: if not set (0), weka will find a free port from the portRange + type: integer + allowHotUpgrade: + type: boolean + autoRemoveTimeout: + default: 24h + description: sets weka cluster-side timeout, if client is not coming + back in specified duration it will be auto removed from cluster + config + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+)$ + type: string + coreIds: + items: + type: integer + type: array + coresNum: + type: integer + cpuPolicy: + default: auto + enum: + - auto + - shared + - dedicated + - dedicated_ht + - manual + type: string + cpuRequest: + type: string + csiGroup: + description: 'EXPERIMENTAL, ALPHA STATE, should not be used in production: + if set, allows to reuse the same csi resources for multiple clients' + type: string + driversDistService: + type: string + driversLoaderImage: + type: string + globalPVC: + properties: + name: + type: string + path: + type: string + required: + - name + type: object + hugepages: + description: hugepages, value in megabytes + type: integer + hugepagesOffset: + description: value in megabytes to offset + type: integer + image: + description: full container image in format of quay.io/weka.io/weka-in-container:VERSION + pattern: ^.+:\d+\.\d+\.\d+.*$ + type: string + imagePullSecret: + type: string + joinIpPorts: + items: + type: string + type: array + network: + properties: + aws: + properties: + deviceSlots: + description: should provide list of additional nics indexes + starting from 1, index 0 is reserved for kernel networking + items: + type: integer + type: array + type: object + deviceSubnets: + description: subnet that is used for devices auto-discovery + items: + pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]{1,2}$ + type: string + type: array + ethDevice: + type: string + ethDevices: + items: + type: string + type: array + ethSlots: + items: + type: string + type: array + gateway: + type: string + udpMode: + type: boolean + type: object + nodeSelector: + additionalProperties: + type: string + type: object + overrides: + properties: + forceDrain: + description: unsafe operation, forces drain on the node where + the container is running, should not be used unless instructed + explicitly by weka personnel, the effect of drain is throwing + away all IOs and acknowledging all umounts in unsafe manner + type: boolean + machineIdentifierNodeRef: + description: used to override machine identifier node reference + for client containers + type: string + skipActiveMountsCheck: + description: option to skip active mounts check before deleting + client containers + type: boolean + umountOnHost: + description: unsafe operation, runs nsenter in root namespace + to umount all wekafs mounts visible on host + type: boolean + type: object + port: + description: if not set (0), weka will find a free port from the portRange + type: integer + portRange: + description: used for dynamic port allocation + properties: + basePort: + default: 45000 + maximum: 65535 + type: integer + portRange: + default: 0 + description: |- + number of ports to check for availability + if 0 - will check all ports from basePort to 65535 + type: integer + required: + - basePort + type: object + rawTolerations: + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + resources: + description: 'experimental: pod resources to be proxied as-is to the + pod spec' + properties: + limits: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + serviceAccountName: + type: string + targetCluster: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + tolerations: + items: + type: string + type: array + tracesConfiguration: + description: TraceConfiguration defines the configuration for the + traces, accepts parameters in gigabytes + properties: + dumperConfigMode: + default: auto + enum: + - override + - partial-override + - auto + - cluster + type: string + ensureFreeSpace: + default: 20 + type: integer + maxCapacityPerIoNode: + default: 10 + type: integer + type: object + upgradePolicy: + properties: + type: + default: all-at-once + enum: + - manual + - all-at-once + - rolling + - all-at-once-force + type: string + type: object + wekaHome: + properties: + cacertSecret: + type: string + type: object + wekaHomeConfig: + description: DEPRECATED, kept for compatibility with old API clients, + not taking any action, to be removed on new API version + properties: + cacertSecret: + type: string + type: object + wekaSecretRef: + type: string + required: + - image + type: object + status: + description: WekaClientStatus defines the observed state of WekaClient + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastAppliedSpec: + type: string + printer: + properties: + containers: + type: string + type: object + stats: + properties: + containers: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + type: object + status: + default: Init + enum: + - Init + - Running + - Upgrading + - Destroying + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/weka-operator/weka-operator/crds/weka.weka.io_wekaclusters.yaml b/weka-operator/weka-operator/crds/weka.weka.io_wekaclusters.yaml new file mode 100644 index 00000000..0aa0b698 --- /dev/null +++ b/weka-operator/weka-operator/crds/weka.weka.io_wekaclusters.yaml @@ -0,0 +1,6384 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: wekaclusters.weka.weka.io +spec: + group: weka.weka.io + names: + kind: WekaCluster + listKind: WekaClusterList + plural: wekaclusters + singular: wekacluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of the cluster + jsonPath: .status.status + name: Status + type: string + - description: Weka cluster GUID + jsonPath: .status.clusterID + name: Cluster ID + type: string + - description: 'Number of compute containers: Active/Created/Desired' + jsonPath: .status.printer.computeContainers + name: CCT(A/C/D) + type: string + - description: 'Number of drive containers: Active/Created/Desired' + jsonPath: .status.printer.driveContainers + name: DCT(A/C/D) + type: string + - description: 'Number of Drives: Active/Created/Desired' + jsonPath: .status.printer.drives + name: DRVS(A/C/D) + type: string + - description: IOPS Read/Write/Metadata + jsonPath: .status.printer.iops + name: IOPS(R/W/M) + priority: 1 + type: string + - description: Throughput Read/Write + jsonPath: .status.printer.throughput + name: THRPT(R/W) + priority: 1 + type: string + - description: Filesystem Capacity + jsonPath: .status.printer.filesystemCapacity + name: FS(Capacity) + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WekaClusterSpec defines the desired state of WekaCluster + properties: + additionalMemory: + description: additional memory to allocate for weka containers + properties: + compute: + type: integer + drive: + type: integer + envoy: + type: integer + nfs: + type: integer + s3: + type: integer + type: object + bucketRaftSize: + description: size of raft for buckets, defaults to 5, 5/9 are supported + type: integer + cpuPolicy: + default: auto + description: cpu policy to use for scheduling cores for weka, unless + instructed by weka team, keep default of auto + enum: + - auto + - shared + - dedicated + - dedicated_ht + - manual + type: string + csiConfig: + properties: + csiDriverName: + description: EXPERIMENTAL, ALPHA STATE, should not be used in + production + type: string + endpointsSubnets: + items: + type: string + type: array + type: object + driversDistService: + description: endpoint for distribution service, global https://drivers.weka.io + or in-k8s-cluster "https://weka-drivers-dist.namespace.svc.cluster.local:60001" + type: string + dynamicTemplate: + description: weka cluster topology configuration + properties: + computeContainers: + type: integer + computeCores: + type: integer + computeHugepages: + type: integer + computeHugepagesOffset: + type: integer + driveContainers: + type: integer + driveCores: + type: integer + driveHugepages: + type: integer + driveHugepagesOffset: + type: integer + envoyCores: + type: integer + nfsContainers: + description: 'EXPERIMENTAL, ALPHA STATE, should not be used in + production: number of NFS containers' + type: integer + nfsCores: + description: 'EXPERIMENTAL, ALPHA STATE, should not be used in + production: number of NFS cores per container' + type: integer + nfsExtraCores: + description: 'EXPERIMENTAL, ALPHA STATE, should not be used in + production: number of NFS extra cores per container' + type: integer + nfsFrontendHugepages: + description: 'EXPERIMENTAL, ALPHA STATE, should not be used in + production: hugepage allocation for NFS frontend' + type: integer + nfsFrontendHugepagesOffset: + description: 'EXPERIMENTAL, ALPHA STATE, should not be used in + production: hugepage offset for NFS frontend' + type: integer + numDrives: + type: integer + s3Containers: + type: integer + s3Cores: + type: integer + s3ExtraCores: + type: integer + s3FrontendHugepages: + type: integer + s3FrontendHugepagesOffset: + type: integer + type: object + expandEndpoints: + description: endpoint of existing weka cluster, containers created + for this k8s-driver cluster will join existing weka cluster, used + in flow of migration + items: + type: string + type: array + failureDomain: + description: failure domain configuration for weka containers + properties: + compositeLabels: + description: |- + If multiple labels are specified, the failure domain will be the combination of the labels. + If `compositeLabels` is set, `label` and `skew` will be ignored. + When using compositeLabels, weka containers will be spread considering all labels + with best effort, but even distribution is not guaranteed + items: + type: string + type: array + label: + description: |- + label used for spreading the weka containers across different failure domains (if set) + nodes that have the same value for the label will be considered as a single failure domain + type: string + skew: + description: |- + skew for the failure domain, if set, the weka containers will be spread with the skew in mind + (only applicable if `label` is set) + type: integer + type: object + globalPVC: + properties: + name: + type: string + path: + type: string + required: + - name + type: object + gracefulDestroyDuration: + default: 24h + description: |- + During this period the cluster will not be destroyed (protection from accidental deletion) + Note: due to discrepancies in validation vs parsing, we use a Pattern instead of `Format=duration`. See + https://bugzilla.redhat.com/show_bug.cgi?id=2050332 + https://github.com/kubernetes/apimachinery/issues/131 + https://github.com/kubernetes/apiextensions-apiserver/issues/56 + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+)$ + type: string + hotSpare: + default: 0 + description: |- + A hot spare is reserved capacity designed to handle data rebuilds while maintaining the system's net capacity, even in the event of failure domains being lost + See: https://docs.weka.io/weka-system-overview/ssd-capacity-management#hot-spare + type: integer + image: + description: full container image name in format of quay.io/weka.io/weka-in-container:VERSION + pattern: ^.+:\d+\.\d+\.\d+.*$ + type: string + imagePullSecret: + description: image pull secret to use for pulling the image + type: string + ipv6: + description: use ipv6 for weka cluster networking configuration + type: boolean + leadershipRaftSize: + description: size of raft for leadership, defaults to 5, 5/9 are supported + type: integer + network: + description: weka cluster network configuration + properties: + aws: + properties: + deviceSlots: + description: should provide list of additional nics indexes + starting from 1, index 0 is reserved for kernel networking + items: + type: integer + type: array + type: object + deviceSubnets: + description: subnet that is used for devices auto-discovery + items: + pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]{1,2}$ + type: string + type: array + ethDevice: + type: string + ethDevices: + items: + type: string + type: array + ethSlots: + items: + type: string + type: array + gateway: + type: string + udpMode: + type: boolean + type: object + nodeSelector: + additionalProperties: + type: string + description: node selector for the weka containers + type: object + operatorSecretRef: + description: reference to the secret containing the weka system credentials + used by operator, used in flow of migration + type: string + overrides: + properties: + allowS3ClusterDestroy: + type: boolean + disregardRedundancy: + description: disregard redundancy constraints, useful for testing, + should not be used in production as misaligns failure domains + type: boolean + driversLoaderImage: + description: image to be used for loading drivers, do not use + unless explicitly instructed by Weka team + type: string + forceAio: + description: force weka to use drives in aio mode and not direct + nvme (impacts performance, but might serve as a fallback in + case of incompatible device) + type: boolean + postFormClusterScript: + description: script to run post cluster create (i.e before starting + io) + type: string + upgradeAllAtOnce: + description: unsafe operation, should not be used unless instructed + explicitly by weka personnel + type: boolean + upgradeForceReplace: + description: unsafe operation, skips graceful stop of weka container + for a quick replacement to a new image, should not be used unless + instructed explicitly by weka personnel + type: boolean + upgradeForceReplaceDrives: + description: unsafe operation, skips graceful stop of drive weka + container for a quick replacement to a new image, should not + be used unless instructed explicitly by weka personnel + type: boolean + upgradePausePreCompute: + description: Prevent from moving into compute phase + type: boolean + upgradePaused: + description: Pause upgrade + type: boolean + type: object + podConfig: + description: advanced pod affinities configuration + properties: + affinity: + description: advanced scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for + the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with + the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the + corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, etc. + as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + roleAffinity: + description: |- + affinity per container role + takes precedence over the `affinity` field + properties: + compute: + description: Affinity is a group of affinity scheduling rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + drive: + description: Affinity is a group of affinity scheduling rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + nfs: + description: Affinity is a group of affinity scheduling rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + s3: + description: Affinity is a group of affinity scheduling rules. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules + for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated + with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching + the corresponding nodeSelectorTerm, in the + range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector + terms. The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the + selector applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. + co-locate this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules + (e.g. avoid putting this pod in the same node, zone, + etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred + node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, + associated with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + type: object + roleTopologySpreadConstraints: + description: takes precedence over the `topologySpreadConstraints` + properties: + compute: + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + drive: + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + nfs: + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + s3: + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + topologySpreadConstraints: + description: controls the distribution of weka containers across + the failure domainsqq + items: + description: TopologySpreadConstraint specifies how to spread + matching pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + type: object + ports: + description: port allocation for weka containers, if not set, free + range will be auto selected. Currently allocated ports can be seen + in wekacluster.status.ports + properties: + basePort: + description: |- + We should not be updating Spec, as it's a user interface and we should not break ability to update spec file + Therefore, when BasePort is 0, and Range as 0, we have application level defaults that will be written in here + type: integer + lbAdminPort: + type: integer + lbPort: + type: integer + portRange: + type: integer + s3Port: + type: integer + type: object + rawTolerations: + description: tolerations in standard k8s format + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + redundancyLevel: + description: storage capacity dedicated to system protection (2/4). + https://docs.weka.io/weka-system-overview/ssd-capacity-management#protection-level + type: integer + roleNodeSelector: + description: node selector for the weka containers per role, overrides + global nodeSelector + properties: + compute: + additionalProperties: + type: string + description: node selector for compute weka containers + type: object + drive: + additionalProperties: + type: string + description: node selector for drive weka containers + type: object + nfs: + additionalProperties: + type: string + description: node selector for nfs weka containers + type: object + s3: + additionalProperties: + type: string + description: node selector for s3 weka containers, envoy will + be scheduled by affinity to s3 and not explicit node selector + type: object + type: object + serviceAccountName: + type: string + startIoConditions: + description: conditions that must be met before starting IO + properties: + minNumDrives: + description: minumum number of drives that should be added to + the cluster before starting IO + type: integer + type: object + stripeWidth: + description: stripe width is the number of blocks within a common + protection set, ranging from 3 to 16 https://docs.weka.io/weka-system-overview/ssd-capacity-management#stripe-width + type: integer + template: + default: dynamic + description: A template/strategy of how to build a cluster, right + now only "dynamic" supported, explicitly specifying config of a + cluster + type: string + tolerations: + description: simplified tolerations, checked only by key existence, + expanding to NoExecute|NoSchedule tolerations + items: + type: string + type: array + tracesConfiguration: + description: traces capacities configuration for weka containers + properties: + dumperConfigMode: + default: auto + enum: + - override + - partial-override + - auto + - cluster + type: string + ensureFreeSpace: + default: 20 + type: integer + maxCapacityPerIoNode: + default: 10 + type: integer + type: object + wekaHome: + description: weka home configuration + properties: + allowInsecure: + type: boolean + cacertSecret: + type: string + enableStats: + type: boolean + endpoint: + type: string + type: object + required: + - image + type: object + status: + description: WekaClusterStatus defines the observed state of WekaCluster + properties: + clusterID: + type: string + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastAppliedImage: + type: string + lastAppliedSpec: + type: string + ports: + properties: + basePort: + description: |- + We should not be updating Spec, as it's a user interface and we should not break ability to update spec file + Therefore, when BasePort is 0, and Range as 0, we have application level defaults that will be written in here + type: integer + lbAdminPort: + type: integer + lbPort: + type: integer + portRange: + type: integer + s3Port: + type: integer + type: object + printer: + properties: + computeContainers: + type: string + driveContainers: + type: string + drives: + type: string + filesystemCapacity: + description: 'Information about filesystem capacity: Available/Used' + type: string + iops: + type: string + throughput: + type: string + type: object + spanId: + type: string + stats: + properties: + alertsCount: + format: int64 + type: integer + capacity: + properties: + hotSpareBytes: + format: int64 + type: integer + totalBytes: + format: int64 + type: integer + unavailableBytes: + format: int64 + type: integer + unprovisionedBytes: + format: int64 + type: integer + type: object + clusterStatus: + type: string + containers: + properties: + compute: + properties: + cpuUtilization: + type: string + numContainers: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + processes: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + type: object + drive: + properties: + cpuUtilization: + type: string + numContainers: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + processes: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + type: object + nfs: + properties: + cpuUtilization: + type: string + numContainers: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + processes: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + type: object + s3: + properties: + cpuUtilization: + type: string + numContainers: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + processes: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + type: object + type: object + drives: + properties: + counters: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + failures: + items: + properties: + serialId: + type: string + wekaDriveId: + type: string + type: object + type: array + type: object + filesystem: + description: FilesystemMetrics contains metrics about filesystem + usage + properties: + activeObsBucketCount: + format: int64 + type: integer + hasTieredFilesystems: + description: Object Store metrics + type: boolean + obsBucketCount: + format: int64 + type: integer + totalAvailableCapacity: + description: TotalAvailableCapacity is the difference between + TotalProvisionedCapacity and TotalUsedCapacity + format: int64 + type: integer + totalAvailableSSDCapacity: + format: int64 + type: integer + totalObsCapacity: + format: int64 + type: integer + totalProvisionedCapacity: + description: TotalProvisionedCapacity is the sum of total_budget + for all filesystems + format: int64 + type: integer + totalProvisionedSSDCapacity: + description: SSD-specific metrics + format: int64 + type: integer + totalUsedCapacity: + description: TotalUsedCapacity is the sum of used_total for + all filesystems + format: int64 + type: integer + totalUsedSSDCapacity: + format: int64 + type: integer + type: object + ioStats: + properties: + iops: + properties: + metadata: + format: int64 + type: integer + read: + format: int64 + type: integer + total: + format: int64 + type: integer + write: + format: int64 + type: integer + required: + - metadata + - read + - total + - write + type: object + throughput: + properties: + read: + format: int64 + type: integer + write: + format: int64 + type: integer + required: + - read + - write + type: object + type: object + lastUpdate: + format: date-time + type: string + numFailures: + additionalProperties: + type: string + type: object + type: object + status: + type: string + timestamps: + additionalProperties: + format: date-time + type: string + type: object + traceId: + type: string + required: + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/weka-operator/weka-operator/crds/weka.weka.io_wekacontainers.yaml b/weka-operator/weka-operator/crds/weka.weka.io_wekacontainers.yaml new file mode 100644 index 00000000..4915b594 --- /dev/null +++ b/weka-operator/weka-operator/crds/weka.weka.io_wekacontainers.yaml @@ -0,0 +1,1878 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: wekacontainers.weka.weka.io +spec: + group: weka.weka.io + names: + kind: WekaContainer + listKind: WekaContainerList + plural: wekacontainers + singular: wekacontainer + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Weka container status + jsonPath: .status.status + name: Status + type: string + - description: Weka container mode + jsonPath: .spec.mode + name: Mode + type: string + - description: Management IPs + jsonPath: .status.printer.managementIPs + name: Management IPs + type: string + - description: Node affinity of container + jsonPath: .status.printer.nodeAffinity + name: Node + type: string + - description: Number of processes per state + jsonPath: .status.printer.processes + name: Processes + priority: 1 + type: string + - description: Number of drives per state + jsonPath: .status.printer.drives + name: Drives + priority: 1 + type: string + - description: Number of active mounts + jsonPath: .status.printer.activeMounts + name: Mounts + priority: 1 + type: string + - description: CPU Utilization + jsonPath: .status.stats.cpuUtilization + name: CPU + priority: 1 + type: string + - description: Time since creation + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Weka container ID + jsonPath: .status.containerID + name: Weka cID + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + additionalMemory: + type: integer + additionalSecrets: + additionalProperties: + type: string + type: object + affinity: + description: advanced scheduling constraints + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the + pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node matches the corresponding matchExpressions; the + node(s) with the highest sum are the most preferred. + items: + description: |- + An empty preferred scheduling term matches all objects with implicit weight 0 + (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the + corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding + nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to an update), the system + may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. + The terms are ORed. + items: + description: |- + A null or empty node selector term matches no objects. The requirements of + them are ANDed. + The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements + by node's labels. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchFields: + description: A list of node selector requirements + by node's fields. + items: + description: |- + A node selector requirement is a selector that contains values, a key, and an operator + that relates the key and values. + properties: + key: + description: The label key that the selector + applies to. + type: string + operator: + description: |- + Represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: |- + An array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. If the operator is Gt or Lt, the values + array must have a single element, which will be interpreted as an integer. + This array is replaced during a strategic merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + type: object + x-kubernetes-map-type: atomic + type: array + x-kubernetes-list-type: atomic + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate + this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. + avoid putting this pod in the same node, zone, etc. as some + other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: |- + The scheduler will prefer to schedule pods to nodes that satisfy + the anti-affinity expressions specified by this field, but it may choose + a node that violates one or more of the expressions. The node that is + most preferred is the one with the greatest sum of weights, i.e. + for each node that meets all of the scheduling requirements (resource + request, requiredDuringScheduling anti-affinity expressions, etc.), + compute a sum by iterating through the elements of this field and adding + "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm + fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated + with the corresponding weight. + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that + the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: |- + weight associated with matching the corresponding podAffinityTerm, + in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + x-kubernetes-list-type: atomic + requiredDuringSchedulingIgnoredDuringExecution: + description: |- + If the anti-affinity requirements specified by this field are not met at + scheduling time, the pod will not be scheduled onto the node. + If the anti-affinity requirements specified by this field cease to be met + at some point during pod execution (e.g. due to a pod label update), the + system may or may not try to eventually evict the pod from its node. + When there are multiple elements, the lists of nodes corresponding to each + podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: |- + Defines a set of pods (namely those matching the labelSelector + relative to the given namespace(s)) that this pod should be + co-located (affinity) or not co-located (anti-affinity) with, + where co-located is defined as running on a node whose value of + the label with key matches that of any node on which + a pod of the set of pods is running + properties: + labelSelector: + description: |- + A label query over a set of resources, in this case pods. + If it's null, this PodAffinityTerm matches with no Pods. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key in (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both matchLabelKeys and labelSelector. + Also, matchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + description: |- + MismatchLabelKeys is a set of pod label keys to select which pods will + be taken into consideration. The keys are used to lookup values from the + incoming pod labels, those key-value labels are merged with `labelSelector` as `key notin (value)` + to select the group of existing pods which pods will be taken into consideration + for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming + pod labels will be ignored. The default value is empty. + The same key is forbidden to exist in both mismatchLabelKeys and labelSelector. + Also, mismatchLabelKeys cannot be set when labelSelector isn't set. + This is a beta field and requires enabling MatchLabelKeysInPodAffinity feature gate (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + namespaceSelector: + description: |- + A label query over the set of namespaces that the term applies to. + The term is applied to the union of the namespaces selected by this field + and the ones listed in the namespaces field. + null selector and null or empty namespaces list means "this pod's namespace". + An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: |- + namespaces specifies a static list of namespace names that the term applies to. + The term is applied to the union of the namespaces listed in this field + and the ones selected by namespaceSelector. + null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + x-kubernetes-list-type: atomic + topologyKey: + description: |- + This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + the labelSelector in the specified namespaces, where co-located is defined as running on a node + whose value of the label with key topologyKey matches that of any node on which any of the + selected pods is running. + Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + x-kubernetes-list-type: atomic + type: object + type: object + agentPort: + type: integer + allowHotUpgrade: + type: boolean + autoRemoveTimeout: + default: 0s + description: sets weka cluster-side timeout, if client is not coming + back in specified duration it will be auto removed from cluster + config + pattern: ^(0|([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+)$ + type: string + coreIds: + items: + type: integer + type: array + cpuPolicy: + default: auto + enum: + - auto + - shared + - dedicated + - dedicated_ht + - manual + type: string + csiDriverName: + description: 'EXPERIMENTAL, ALPHA STATE, should not be used in production: + computed csi driver name for client container to ensure csi-node' + type: string + driversDistService: + type: string + driversLoaderImage: + type: string + dropAffinityConstraints: + type: boolean + exposePorts: + description: deprecated, use ExposedPorts instead + items: + type: integer + type: array + exposedPorts: + description: ports to be exposed on the container, proxied to pod + items: + description: ContainerPort represents a network port in a single + container. + properties: + containerPort: + description: |- + Number of port to expose on the pod's IP address. + This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: |- + Number of port to expose on the host. + If specified, this must be a valid port number, 0 < x < 65536. + If HostNetwork is specified, this must match ContainerPort. + Most containers do not need this. + format: int32 + type: integer + name: + description: |- + If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + named port in a pod must have a unique name. Name for the port that can be + referred to by services. + type: string + protocol: + default: TCP + description: |- + Protocol for port. Must be UDP, TCP, or SCTP. + Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + extraCores: + type: integer + failureDomain: + description: failure domain configuration + properties: + compositeLabels: + description: |- + If multiple labels are specified, the failure domain will be the combination of the labels. + If `compositeLabels` is set, `label` and `skew` will be ignored. + When using compositeLabels, weka containers will be spread considering all labels + with best effort, but even distribution is not guaranteed + items: + type: string + type: array + label: + description: |- + label used for spreading the weka containers across different failure domains (if set) + nodes that have the same value for the label will be considered as a single failure domain + type: string + skew: + description: |- + skew for the failure domain, if set, the weka containers will be spread with the skew in mind + (only applicable if `label` is set) + type: integer + type: object + group: + type: string + hostPID: + type: boolean + hugepages: + type: integer + hugepagesOffset: + type: integer + hugepagesSize: + type: string + hugepagesSizeOverride: + type: string + image: + type: string + imagePullSecret: + type: string + instructions: + properties: + payload: + type: string + type: + type: string + required: + - type + type: object + ipv6: + type: boolean + joinIpPorts: + items: + type: string + type: array + mode: + enum: + - drive + - compute + - client + - dist + - drivers-dist + - drivers-loader + - drivers-builder + - discovery + - s3 + - adhoc-op-with-container + - adhoc-op + - envoy + - nfs + type: string + name: + type: string + network: + properties: + aws: + properties: + deviceSlots: + description: should provide list of additional nics indexes + starting from 1, index 0 is reserved for kernel networking + items: + type: integer + type: array + type: object + deviceSubnets: + description: subnet that is used for devices auto-discovery + items: + pattern: ^([0-9]{1,3}\.){3}[0-9]{1,3}\/[0-9]{1,2}$ + type: string + type: array + ethDevice: + type: string + ethDevices: + items: + type: string + type: array + gateway: + type: string + udpMode: + type: boolean + type: object + nodeAffinity: + description: name of the node where the container should run on + type: string + nodeInfoConfigMap: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + numCores: + type: integer + numDrives: + type: integer + overrides: + properties: + debugSleepOnTerminate: + description: DebugSleepOnTerminate specifies the number of seconds + to sleep on container abnormal exit for debugging purposes + type: integer + forceDrain: + description: unsafe operation, forces drain on the node where + the container is running, should not be used unless instructed + explicitly by weka personnel, the effect of drain is throwing + away all IOs and acknowledging all umounts in unsafe manner + type: boolean + machineIdentifierNodeRef: + type: string + migrateOutFromPvc: + description: MigrateOutFromPvc specifies that the container should + be migrated out from PVC into local storage, this will be done + prior to starting pod + type: boolean + podDeleteForceReplace: + type: boolean + preRunScript: + description: script to be executed post initial persistency(if + needed) configuration, before running actual workload + type: string + skipActiveMountsCheck: + description: option to skip active mounts check before deleting + client containers + type: boolean + skipCleanupPersistentDir: + description: skips cleanup of persistent directory, if this operation + was omit local data of container will remain in persistent location(/opt/k8s-weka + on vanilla OS/k8s distributions) + type: boolean + skipDeactivate: + description: skips deactivation of container, this is unsafe operation + that should be used only when this container will never be back + into cluster + type: boolean + skipDrivesForceResign: + description: skips resign of drives, if we did not resign drives + on removal of drive container we will not be able to reuse them, + and manual operation with force resign will be required + type: boolean + umountOnHost: + description: unsafe operation, runs nsenter in root namespace + to umount all wekafs mounts visible on host + type: boolean + upgradeForceReplace: + description: unsafe operation, skips graceful stop of weka container + for a quick replacement to a new image, should not be used unless + instructed explicitly by weka personnel + type: boolean + upgradePreventEviction: + type: boolean + type: object + port: + type: integer + portRange: + properties: + basePort: + default: 45000 + maximum: 65535 + type: integer + portRange: + default: 0 + description: |- + number of ports to check for availability + if 0 - will check all ports from basePort to 65535 + type: integer + required: + - basePort + type: object + pvc: + properties: + name: + type: string + path: + type: string + required: + - name + type: object + resources: + description: resources to be proxied as-is to the pod spec + properties: + limits: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + requests: + properties: + cpu: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + memory: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + type: object + serviceAccountName: + type: string + state: + default: active + enum: + - active + - paused + - destroying + - deleting + type: string + tolerations: + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: controls the distribution of weka containers across the + failure domains + items: + description: TopologySpreadConstraint specifies how to spread matching + pods among the given topology. + properties: + labelSelector: + description: |- + LabelSelector is used to find matching pods. + Pods that match this label selector are counted to determine the number of pods + in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: |- + MatchLabelKeys is a set of pod label keys to select the pods over which + spreading will be calculated. The keys are used to lookup values from the + incoming pod labels, those key-value labels are ANDed with labelSelector + to select the group of existing pods over which spreading will be calculated + for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. + MatchLabelKeys cannot be set when LabelSelector isn't set. + Keys that don't exist in the incoming pod labels will + be ignored. A null or empty list means only match against labelSelector. + + This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: |- + MaxSkew describes the degree to which pods may be unevenly distributed. + When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference + between the number of matching pods in the target topology and the global minimum. + The global minimum is the minimum number of matching pods in an eligible domain + or zero if the number of eligible domains is less than MinDomains. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 2/2/1: + In this case, the global minimum is 1. + | zone1 | zone2 | zone3 | + | P P | P P | P | + - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; + scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) + violate MaxSkew(1). + - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence + to topologies that satisfy it. + It's a required field. Default value is 1 and 0 is not allowed. + format: int32 + type: integer + minDomains: + description: |- + MinDomains indicates a minimum number of eligible domains. + When the number of eligible domains with matching topology keys is less than minDomains, + Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed. + And when the number of eligible domains with matching topology keys equals or greater than minDomains, + this value has no effect on scheduling. + As a result, when the number of eligible domains is less than minDomains, + scheduler won't schedule more than maxSkew Pods to those domains. + If value is nil, the constraint behaves as if MinDomains is equal to 1. + Valid values are integers greater than 0. + When value is not nil, WhenUnsatisfiable must be DoNotSchedule. + + For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same + labelSelector spread as 2/2/2: + | zone1 | zone2 | zone3 | + | P P | P P | P P | + The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0. + In this situation, new pod with the same labelSelector cannot be scheduled, + because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, + it will violate MaxSkew. + format: int32 + type: integer + nodeAffinityPolicy: + description: |- + NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector + when calculating pod topology spread skew. Options are: + - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. + - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. + + If this value is nil, the behavior is equivalent to the Honor policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + nodeTaintsPolicy: + description: |- + NodeTaintsPolicy indicates how we will treat node taints when calculating + pod topology spread skew. Options are: + - Honor: nodes without taints, along with tainted nodes for which the incoming pod + has a toleration, are included. + - Ignore: node taints are ignored. All nodes are included. + + If this value is nil, the behavior is equivalent to the Ignore policy. + This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. + type: string + topologyKey: + description: |- + TopologyKey is the key of node labels. Nodes that have a label with this key + and identical values are considered to be in the same topology. + We consider each as a "bucket", and try to put balanced number + of pods into each bucket. + We define a domain as a particular instance of a topology. + Also, we define an eligible domain as a domain whose nodes meet the requirements of + nodeAffinityPolicy and nodeTaintsPolicy. + e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. + And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. + It's a required field. + type: string + whenUnsatisfiable: + description: |- + WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + the spread constraint. + - DoNotSchedule (default) tells the scheduler not to schedule it. + - ScheduleAnyway tells the scheduler to schedule the pod in any location, + but giving higher precedence to topologies that would help reduce the + skew. + A constraint is considered "Unsatisfiable" for an incoming pod + if and only if every possible node assignment for that pod would violate + "MaxSkew" on some topology. + For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | + | P P P | P | P | + If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + won't make it *more* imbalanced. + It's a required field. + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + tracesConfiguration: + description: TraceConfiguration defines the configuration for the + traces, accepts parameters in gigabytes + properties: + dumperConfigMode: + default: auto + enum: + - override + - partial-override + - auto + - cluster + type: string + ensureFreeSpace: + default: 20 + type: integer + maxCapacityPerIoNode: + default: 10 + type: integer + type: object + upgradePolicyType: + default: manual + enum: + - manual + - all-at-once + - rolling + - all-at-once-force + type: string + uploadResultsTo: + type: string + wekaSecretRef: + description: EnvVarSource represents a source for the value of an + EnvVar. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the ConfigMap or its key must + be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + properties: + apiVersion: + description: Version of the schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + properties: + containerName: + description: 'Container name: required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - image + - mode + - name + - numCores + type: object + status: + properties: + allocations: + properties: + agentPort: + type: integer + drives: + items: + type: string + type: array + ethSlots: + items: + type: string + type: array + failureDomain: + description: value of the failure domain label of the node where + the container is running + type: string + lbPort: + type: integer + machineIdentifier: + type: string + netDevices: + items: + type: string + type: array + wekaPort: + type: integer + type: object + clusterID: + type: string + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + containerID: + type: integer + internalStatus: + type: string + lastAppliedImage: + type: string + lastAppliedSpec: + type: string + managementIP: + type: string + managementIPs: + items: + type: string + type: array + nodeAffinity: + description: "NodeName is a type that holds a api.Node's Name identifier.\nBeing + a type captures intent and helps make sure that the node name\nis + not confused with similar concepts (the hostname, the cloud provider + id,\nthe cloud provider name etc)\n\nTo clarify the various types:\n\n + \ - Node.Name is the Name field of the Node in the API. This should + be stored in a NodeName.\n Unfortunately, because Name is part + of ObjectMeta, we can't store it as a NodeName at the API level.\n\n + \ - Hostname is the hostname of the local machine (from uname -n).\n + \ However, some components allow the user to pass in a --hostname-override + flag,\n which will override this in most places. In the absence + of anything more meaningful,\n kubelet will use Hostname as the + Node.Name when it creates the Node.\n\n* The cloudproviders have + the own names: GCE has InstanceName, AWS has InstanceId.\n\n\tFor + GCE, InstanceName is the Name of an Instance object in the GCE API. + \ On GCE, Instance.Name becomes the\n\tHostname, and thus it makes + sense also to use it as the Node.Name. But that is GCE specific, + and it is up\n\tto the cloudprovider how to do this mapping.\n\n\tFor + AWS, the InstanceID is not yet suitable for use as a Node.Name, + so we actually use the\n\tPrivateDnsName for the Node.Name. And + this is _not_ always the same as the hostname: if\n\twe are using + a custom DHCP domain it won't be." + type: string + notToleratedOnReschedule: + type: boolean + printer: + properties: + activeMounts: + type: string + drives: + type: string + managementIPs: + description: pretty-printed management IPs + type: string + nodeAffinity: + description: node name where the container is running + type: string + processes: + type: string + type: object + result: + type: string + stats: + properties: + activeMounts: + format: int64 + type: integer + cpuUtilization: + type: string + drives: + properties: + counters: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + failures: + items: + properties: + serialId: + type: string + wekaDriveId: + type: string + type: object + type: array + type: object + lastUpdate: + format: date-time + type: string + processes: + properties: + active: + format: int64 + type: integer + created: + format: int64 + type: integer + desired: + format: int64 + type: integer + type: object + type: object + status: + type: string + timestamps: + additionalProperties: + format: date-time + type: string + type: object + required: + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/weka-operator/weka-operator/crds/weka.weka.io_wekamanualoperations.yaml b/weka-operator/weka-operator/crds/weka.weka.io_wekamanualoperations.yaml new file mode 100644 index 00000000..af49b583 --- /dev/null +++ b/weka-operator/weka-operator/crds/weka.weka.io_wekamanualoperations.yaml @@ -0,0 +1,299 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: wekamanualoperations.weka.weka.io +spec: + group: weka.weka.io + names: + kind: WekaManualOperation + listKind: WekaManualOperationList + plural: wekamanualoperations + singular: wekamanualoperation + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Action + jsonPath: .spec.action + name: Action + type: string + - description: Status + jsonPath: .status.status + name: Status + type: string + - description: Result + jsonPath: .status.result + name: Result + priority: 1 + type: string + - description: Time since creation + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: WekaManualOperation is the Schema for the wekamanualoperations + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WekaManualOperationSpec defines the desired state of WekaManualOperation + properties: + action: + enum: + - sign-drives + - discover-drives + - force-resign-drives + - block-drives + - unblock-drives + - ensure-nics + - remote-traces-session + type: string + image: + type: string + imagePullSecret: + type: string + payload: + properties: + blockDrivesPayload: + properties: + node: + type: string + serialIDs: + items: + type: string + type: array + required: + - node + - serialIDs + type: object + discoverDrivesPayload: + properties: + nodeSelector: + additionalProperties: + type: string + type: object + type: object + ensureNICsPayload: + properties: + dataNICsNumber: + type: integer + nodeSelector: + additionalProperties: + type: string + type: object + type: + type: string + required: + - type + type: object + forceResignDrivesPayload: + properties: + devicePaths: + items: + type: string + type: array + deviceSerials: + items: + type: string + type: array + nodeName: + description: "NodeName is a type that holds a api.Node's Name + identifier.\nBeing a type captures intent and helps make + sure that the node name\nis not confused with similar concepts + (the hostname, the cloud provider id,\nthe cloud provider + name etc)\n\nTo clarify the various types:\n\n - Node.Name + is the Name field of the Node in the API. This should be + stored in a NodeName.\n Unfortunately, because Name is + part of ObjectMeta, we can't store it as a NodeName at the + API level.\n\n - Hostname is the hostname of the local + machine (from uname -n).\n However, some components allow + the user to pass in a --hostname-override flag,\n which + will override this in most places. In the absence of anything + more meaningful,\n kubelet will use Hostname as the Node.Name + when it creates the Node.\n\n* The cloudproviders have the + own names: GCE has InstanceName, AWS has InstanceId.\n\n\tFor + GCE, InstanceName is the Name of an Instance object in the + GCE API. On GCE, Instance.Name becomes the\n\tHostname, + and thus it makes sense also to use it as the Node.Name. + \ But that is GCE specific, and it is up\n\tto the cloudprovider + how to do this mapping.\n\n\tFor AWS, the InstanceID is + not yet suitable for use as a Node.Name, so we actually + use the\n\tPrivateDnsName for the Node.Name. And this is + _not_ always the same as the hostname: if\n\twe are using + a custom DHCP domain it won't be." + type: string + required: + - nodeName + type: object + remoteTracesSessionPayload: + properties: + allowHttpWekahomeEndpoint: + type: boolean + allowInsecureWekahomeEndpoint: + type: boolean + cluster: + properties: + name: + type: string + namespace: + type: string + required: + - name + - namespace + type: object + duration: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + wekahomeCaSecret: + type: string + wekahomeEndpointOverride: + type: string + type: object + signDrivesPayload: + properties: + devicePaths: + items: + type: string + type: array + nodeSelector: + additionalProperties: + type: string + type: object + options: + properties: + allowEraseNonWekaPartitions: + type: boolean + allowEraseWekaPartitions: + type: boolean + allowNonEmptyDevice: + type: boolean + skipTrimFormat: + type: boolean + type: object + pciDevices: + description: |- + PCI vendor and device IDs of the drives to sign. + To get the values for VendorId and DeviceId: + 1. Run the following command to list all PCI devices on your system: + ```bash + lspci -nn + ``` + 2. Find the relevant PCI device in the output, which will display both the + vendor and device IDs in square brackets in the format [vendorId:deviceId]. + For example: + ``` + 00:1f.0 Non-Volatile memory controller [0108]: Amazon.com, Inc. NVMe SSD Controller [1d0f:cd01] + ``` + properties: + deviceId: + description: |- + DeviceId is the 4-digit hexadecimal device ID + default for AWS: `cd01` (NVMe SSD) + pattern: ^[0-9a-fA-F]{4}$ + type: string + vendorId: + description: |- + VendorId is the 4-digit hexadecimal vendor ID + default for AWS: `1d0f` (Amazon.com, Inc.) + pattern: ^[0-9a-fA-F]{4}$ + type: string + required: + - deviceId + - vendorId + type: object + type: + enum: + - aws-all + - device-identifiers + - device-paths + - all-not-root + type: string + required: + - type + type: object + type: object + tolerations: + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + required: + - action + - payload + type: object + status: + description: WekaManualOperationStatus defines the observed state of WekaManualOperation + properties: + completedAt: + format: date-time + type: string + result: + type: string + status: + type: string + required: + - completedAt + - result + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/weka-operator/weka-operator/crds/weka.weka.io_wekapolicies.yaml b/weka-operator/weka-operator/crds/weka.weka.io_wekapolicies.yaml new file mode 100644 index 00000000..fb3e8bc1 --- /dev/null +++ b/weka-operator/weka-operator/crds/weka.weka.io_wekapolicies.yaml @@ -0,0 +1,287 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.5 + name: wekapolicies.weka.weka.io +spec: + group: weka.weka.io + names: + kind: WekaPolicy + listKind: WekaPolicyList + plural: wekapolicies + singular: wekapolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Type + jsonPath: .spec.type + name: Type + type: string + - description: Status + jsonPath: .status.status + name: Status + type: string + - description: Policy-specific progress + jsonPath: .status.progress + name: Progress + type: string + - description: Last result + jsonPath: .status.result + name: Result + priority: 1 + type: string + - description: Interval + jsonPath: .spec.payload.interval + name: Interval + priority: 1 + type: string + - description: Last run time + jsonPath: .status.lastRunTime + name: Last Run Time + priority: 1 + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: WekaPolicy is the Schema for the wekapolicies API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: WekaPolicySpec defines the desired state of WekaPolicy + properties: + image: + type: string + imagePullSecret: + type: string + payload: + properties: + discoverDrivesPayload: + properties: + nodeSelector: + additionalProperties: + type: string + type: object + type: object + driverDistPayload: + description: DriverDistPayload defines the parameters for the + enable-local-drivers-distribution policy + properties: + architectureLabelKey: + description: |- + ArchitectureLabelKey is the custom label key to use for storing the node's architecture. + If not specified, "weka.io/architecture" will be used. + type: string + builderPreRunScript: + description: BuilderPreRunScript is an optional script to + run on builder containers after kernel validation. + type: string + distNodeSelector: + additionalProperties: + type: string + description: |- + DistNodeSelector is the node selector for the drivers distribution (dist) container. + If not specified, the dist container will be scheduled on any available node. + type: object + ensureImages: + description: EnsureImages is a list of Weka images for which + drivers should be proactively built. + items: + type: string + type: array + kernelLabelKey: + description: |- + KernelLabelKey is the custom label key to use for storing the node's kernel version. + If not specified, "weka.io/kernel" will be used. + type: string + nodeSelectors: + description: |- + NodeSelectors is a list of node selectors. Nodes matching any of these selectors will be considered for driver building. + If empty, all nodes in the cluster are considered. + items: + additionalProperties: + type: string + type: object + type: array + type: object + ensureNICsPayload: + properties: + dataNICsNumber: + type: integer + nodeSelector: + additionalProperties: + type: string + type: object + type: + type: string + required: + - type + type: object + interval: + default: 5m + pattern: ^(0|([0-9]+(\.[0-9]+)?(s|m|h))+)$ + type: string + schedulingConfigPayload: + type: object + signDrivesPayload: + properties: + devicePaths: + items: + type: string + type: array + nodeSelector: + additionalProperties: + type: string + type: object + options: + properties: + allowEraseNonWekaPartitions: + type: boolean + allowEraseWekaPartitions: + type: boolean + allowNonEmptyDevice: + type: boolean + skipTrimFormat: + type: boolean + type: object + pciDevices: + description: |- + PCI vendor and device IDs of the drives to sign. + To get the values for VendorId and DeviceId: + 1. Run the following command to list all PCI devices on your system: + ```bash + lspci -nn + ``` + 2. Find the relevant PCI device in the output, which will display both the + vendor and device IDs in square brackets in the format [vendorId:deviceId]. + For example: + ``` + 00:1f.0 Non-Volatile memory controller [0108]: Amazon.com, Inc. NVMe SSD Controller [1d0f:cd01] + ``` + properties: + deviceId: + description: |- + DeviceId is the 4-digit hexadecimal device ID + default for AWS: `cd01` (NVMe SSD) + pattern: ^[0-9a-fA-F]{4}$ + type: string + vendorId: + description: |- + VendorId is the 4-digit hexadecimal vendor ID + default for AWS: `1d0f` (Amazon.com, Inc.) + pattern: ^[0-9a-fA-F]{4}$ + type: string + required: + - deviceId + - vendorId + type: object + type: + enum: + - aws-all + - device-identifiers + - device-paths + - all-not-root + type: string + required: + - type + type: object + waitForPolicies: + items: + type: string + type: array + type: object + tolerations: + items: + description: |- + The pod this Toleration is attached to tolerates any taint that matches + the triple using the matching operator . + properties: + effect: + description: |- + Effect indicates the taint effect to match. Empty means match all taint effects. + When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: |- + Key is the taint key that the toleration applies to. Empty means match all taint keys. + If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: |- + Operator represents a key's relationship to the value. + Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod can + tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: |- + TolerationSeconds represents the period of time the toleration (which must be + of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do not evict). Zero and + negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: |- + Value is the taint value the toleration matches to. + If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: + type: string + required: + - payload + - type + type: object + status: + description: WekaPolicyStatus defines the observed state of WekaPolicy + properties: + lastRunTime: + format: date-time + type: string + progress: + type: string + result: + type: string + status: + type: string + typedStatus: + description: TypedPolicyStatus holds status fields specific to a policy + type + properties: + distService: + description: DistServiceStatus holds the status for the enable-local-drivers-distribution + policy + properties: + serviceUrl: + type: string + type: object + type: object + required: + - lastRunTime + - result + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/weka-operator/weka-operator/resources/cos-devenv.sh b/weka-operator/weka-operator/resources/cos-devenv.sh new file mode 100644 index 00000000..cadc6792 --- /dev/null +++ b/weka-operator/weka-operator/resources/cos-devenv.sh @@ -0,0 +1,671 @@ +#!/bin/bash +# Create kernel development environment for COS + +set -o errexit +set -o pipefail + +ROOT_MOUNT_DIR="${ROOT_MOUNT_DIR:-/root}" +RETRY_COUNT=${RETRY_COUNT:-5} + +readonly COS_CI_DOWNLOAD_GCS="gs://cos-infra-prod-artifacts-official" +readonly TOOLCHAIN_URL_FILENAME="toolchain_path" +readonly KERNEL_HEADERS="kernel-headers.tgz" +readonly KERNEL_HEADERS_DIR="kernel-headers" +readonly TOOLCHAIN_ARCHIVE_GCS="toolchain.tar.xz.gcs" +readonly TOOLCHAIN_ENV_FILENAME="toolchain_env" +ROOT_OS_RELEASE="${ROOT_MOUNT_DIR}/etc/os-release" +readonly RETCODE_ERROR=1 +RELEASE_ID="" +# +# Individual build directory, contains kernel headers for the specific build and +# a symlink 'toolchain' that points to the toolchain used for this particular +# build +# +BUILD_DIR="" + +KERNEL_CONFIGS="defconfig" +BUILD_DEBUG_PACKAGE="false" +BUILD_HEADERS_PACKAGE="false" +CLEAN_BEFORE_BUILD="false" + +BOARD="" +BUILD_ID="" + +# official release, CI build, or cross-toolchain +MODE="" + +CROS_TC_VERSION="2021.06.26.094653" +# Chromium OS toolchain bucket +CROS_TC_DOWNLOAD_GCS="gs://chromiumos-sdk/" +# COS toolchain bucket +COS_TC_DOWNLOAD_GCS="gs://cos-sdk/" + +# Can be overridden by the command-line argument +TOOLCHAIN_ARCH="x86_64" +KERNEL_ARCH="x86_64" + +# CC and CXX will be set by set_compilation_env +CC="" +CXX="" + +# Use out-of-tree build for full kernel build +KBUILD_OUTPUT="." + +_log() { + local -r prefix="$1" + shift + echo "[${prefix}$(date -u "+%Y-%m-%d %H:%M:%S %Z")] ""$*" >&2 +} + +info() { + _log "INFO " "$*" +} + +warn() { + _log "WARNING " "$*" +} + +error() { + _log "ERROR " "$*" +} + +####################################### +# Choose the public GCS bucket of COS to fetch files from +# "cos-tools", "cos-tools-eu" and "cos-tools-asia" +# based on where the VM is running. +# Arguments: +# None +# Globals: +# COS_DOWNLOAD_GCS +####################################### +get_cos_tools_bucket() { + # Get the zone the VM is running in. + # Example output: projects/438692578867/zones/us-west2-a + # If not running on GCE, use "cos-tools" by default. + metadata_zone="$(curl -s -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/zone)" || { + readonly COS_DOWNLOAD_GCS="gs://cos-tools" + return + } + zone="$( echo $metadata_zone | rev | cut -d '/' -f 1 | rev )" + prefix="$( echo $zone | cut -d '-' -f 1 )" + case $prefix in + "us" | "northamerica" | "southamerica") + readonly COS_DOWNLOAD_GCS="gs://cos-tools" + ;; + "europe") + readonly COS_DOWNLOAD_GCS="gs://cos-tools-eu" + ;; + "asia" | "australia") + readonly COS_DOWNLOAD_GCS="gs://cos-tools-asia" + ;; + *) + readonly COS_DOWNLOAD_GCS="gs://cos-tools" + ;; + esac +} + +download_from_url() { + local -r url="$1" + local -r output="$2" + info "Downloading from URL: ${url}" + info "Local download location: ${output}" + local attempts=0 + until curl --http1.1 -sfS "${url}" -o "${output}"; do + attempts=$(( attempts + 1)) + if (( "${attempts}" >= "${RETRY_COUNT}" )); then + error "Could not download from ${url}" + return ${RETCODE_ERROR} + fi + warn "Error downloading from ${url}, retrying" + sleep 1 + done + info "Download finished" +} + +download_from_gcs() { + local -r url="$1" + local -r output="$2" + info "Downloading from Google Storage: ${url}" + info "Local download location: ${output}" + local attempts=0 + until gsutil -q cp "${url}" "${output}"; do + attempts=$(( attempts + 1)) + if (( "${attempts}" >= "${RETRY_COUNT}" )); then + error "Could not download from ${url}" + return ${RETCODE_ERROR} + fi + warn "Error downloading from ${url}, retrying" + sleep 1 + done + info "Download finished" +} + +# Get the toolchain description in the form of $toolchain-$version +# For CrOS toolchain it's just a basename without extension but for +# COS toolchain version needs to be extarcted from the GCS bucket path +get_toolchain_pkg_name() { + local -r download_url=$1 + case "${download_url}" in + *//cos-sdk/*) + local -r toolchain="$(basename -s .tar.xz "${download_url}")" + local -r path="$(echo "${download_url}" | sed 's@\w\+://cos-sdk/@@')" + local -r version="$(echo "${path}" | awk -F / '{print $1 "-" $2}')" + echo "${toolchain}-${version}" + ;; + *//chromiumos-sdk/*) + echo "$(basename -s .tar.xz "${download_url}")" + ;; + *) + error "Unknown toolchain source: ${download_url}" + exit ${RETCODE_ERROR} + ;; + esac +} + +install_cross_toolchain_pkg() { + local -r download_url=$1 + local -r tmpdownload="$(mktemp -d)" + local -r archive_name="$(basename "${download_url}")" + local -r pkg_name="$(get_toolchain_pkg_name "${download_url}")" + local -r toolchain_dir="/build/toolchains/${pkg_name}" + if [[ ! -d "${toolchain_dir}" ]]; then + info "Downloading prebuilt toolchain from ${download_url}" + download_from_gcs "${download_url}" "${tmpdownload}/${archive_name}" + # Don't unpack Rust toolchain elements because they are not needed and they + # use a lot of disk space. + mkdir -p "${toolchain_dir}" + info "Unpacking toolchain to ${toolchain_dir}" + tar axf "${tmpdownload}/${archive_name}" -C "${toolchain_dir}" \ + --exclude='./usr/lib64/rustlib*' \ + --exclude='./usr/lib64/libstd-*.so' \ + --exclude='./lib/libstd-*.so' \ + --exclude='./lib/librustc*' \ + --exclude='./usr/lib64/librustc*' + rm -rf "${tmpdownload}" + info "Toolchain installed" + else + info "Toolchain is already cached" + fi + + if [[ ! -L "${BUILD_DIR}/toolchain" ]]; then + ln -s "${toolchain_dir}" "${BUILD_DIR}/toolchain" + fi + + # keep toolchain source information + echo -n "${download_url}" > "${BUILD_DIR}/toolchain_url" +} + +install_release_cross_toolchain() { + info "Downloading and installing a toolchain" + # Get toolchain_env path from COS GCS bucket + local -r tc_env_file_path="${COS_DOWNLOAD_GCS}/${RELEASE_ID}/${TOOLCHAIN_ENV_FILENAME}" + info "Obtaining toolchain_env file from ${tc_env_file_path}" + + # Download toolchain_env if present + if ! download_from_gcs "${tc_env_file_path}" "${BUILD_DIR}/${TOOLCHAIN_ENV_FILENAME}"; then + error "Failed to download toolchain file" + error "Make sure build id '$RELEASE_ID' is valid" + return ${RETCODE_ERROR} + fi + + # Download .gcs file with the original location of the toolchain + # we need the version to put it in cachable location + local -r tc_gcs_download_url="${COS_DOWNLOAD_GCS}/${RELEASE_ID}/${TOOLCHAIN_ARCHIVE_GCS}" + if ! download_from_gcs "${tc_gcs_download_url}" "${BUILD_DIR}/${TOOLCHAIN_ARCHIVE_GCS}"; then + error "Failed to download toolchain .gcs file" + error "Make sure build id '$RELEASE_ID' is valid" + return ${RETCODE_ERROR} + fi + + local -r bucket=$(cat "${BUILD_DIR}/${TOOLCHAIN_ARCHIVE_GCS}" | grep ^bucket: | cut -d ' ' -f 2) + local -r path=$(cat "${BUILD_DIR}/${TOOLCHAIN_ARCHIVE_GCS}" | grep ^path: | cut -d ' ' -f 2) + local -r tc_download_url="gs://$bucket/$path" + + # Install toolchain pkg + install_cross_toolchain_pkg "${tc_download_url}" +} + +install_release_kernel_headers() { + info "Downloading and installing a kernel headers" + local -r kernel_headers_file_path="${COS_DOWNLOAD_GCS}/${RELEASE_ID}/${KERNEL_HEADERS}" + info "Obtaining kernel headers file from ${kernel_headers_file_path}" + + if ! download_from_gcs "${kernel_headers_file_path}" "${BUILD_DIR}/${KERNEL_HEADERS}"; then + return ${RETCODE_ERROR} + fi + mkdir -p "${BUILD_DIR}/${KERNEL_HEADERS_DIR}" + tar axf "${BUILD_DIR}/${KERNEL_HEADERS}" -C "${BUILD_DIR}/${KERNEL_HEADERS_DIR}" + rm -f "${BUILD_DIR}/${KERNEL_HEADERS}" +} + +# Download and install toolchain from the CI or tryjob build directory +install_build_cross_toolchain() { + local -r bucket="$1" + + info "Downloading and installing a toolchain" + # Get toolchain_env path from COS GCS bucket + local -r tc_env_file_path="${bucket}/${TOOLCHAIN_ENV_FILENAME}" + local -r tc_url_file_path="${bucket}/${TOOLCHAIN_URL_FILENAME}" + + info "Obtaining toolchain_env file from ${tc_env_file_path}" + + # Download toolchain_env if present + if ! download_from_gcs "${tc_env_file_path}" "${BUILD_DIR}/${TOOLCHAIN_ENV_FILENAME}"; then + error "Failed to download toolchain file" + error "Make sure build id '$RELEASE_ID' is valid" + return ${RETCODE_ERROR} + fi + + # Download toolchain_path if present + if ! download_from_gcs "${tc_url_file_path}" "${BUILD_DIR}/${TOOLCHAIN_URL_FILENAME}"; then + error "Failed to download toolchain file" + error "Make sure build id '$RELEASE_ID' is valid" + return ${RETCODE_ERROR} + fi + + local -r tc_path="$(cat ${BUILD_DIR}/${TOOLCHAIN_URL_FILENAME})" + local tc_download_url="${COS_TC_DOWNLOAD_GCS}${tc_path}" + if ! gsutil -q stat "${tc_download_url}"; then + tc_download_url="${CROS_TC_DOWNLOAD_GCS}${tc_path}" + fi + + if ! gsutil -q stat "${tc_download_url}"; then + error "Toolchain path '${tc_path}' does not exist in either COS or CrOS GCS buckets" + return ${RETCODE_ERROR} + fi + + # Install toolchain pkg + install_cross_toolchain_pkg "${tc_download_url}" +} + +# Download and install kernel headers from the CI or tryjob build directory +install_build_kernel_headers() { + local -r bucket="$1" + + info "Downloading and installing a kernel headers" + local -r kernel_headers_file_path="${bucket}/${KERNEL_HEADERS}" + info "Obtaining kernel headers file from ${kernel_headers_file_path}" + + if ! download_from_gcs "${kernel_headers_file_path}" "${BUILD_DIR}/${KERNEL_HEADERS}"; then + return ${RETCODE_ERROR} + fi + mkdir -p "${BUILD_DIR}/${KERNEL_HEADERS_DIR}" + tar axf "${BUILD_DIR}/${KERNEL_HEADERS}" -C "${BUILD_DIR}/${KERNEL_HEADERS_DIR}" + rm -f "${BUILD_DIR}/${KERNEL_HEADERS}" +} + +install_generic_cross_toolchain() { + info "Downloading and installing a toolchain" + # Download toolchain_env if present + local -r tc_date="$(echo ${CROS_TC_VERSION} | sed -E 's/\.(..).*/\/\1/')" + local -r tc_download_url="${CROS_TC_DOWNLOAD_GCS}${tc_date}/${TOOLCHAIN_ARCH}-cros-linux-gnu-${CROS_TC_VERSION}.tar.xz" + + # Install toolchain pkg + install_cross_toolchain_pkg "${tc_download_url}" +} + +set_compilation_env() { + local -r tc_env_file_path="${COS_DOWNLOAD_GCS}/${RELEASE_ID}/${TOOLCHAIN_ENV_FILENAME}" + # toolchain_env file will set 'CC' and 'CXX' environment + # variable based on the toolchain used for kernel compilation + if [[ -f "${BUILD_DIR}/${TOOLCHAIN_ENV_FILENAME}" ]]; then + source "${BUILD_DIR}/${TOOLCHAIN_ENV_FILENAME}" + export CC + export CXX + else + export CC="${TOOLCHAIN_ARCH}-cros-linux-gnu-clang" + export CXX="${TOOLCHAIN_ARCH}-cros-linux-gnu-clang++" + fi + info "Configuring environment variables for cross-compilation" + # CC and CXX are already set in toolchain_env + TOOLCHAIN_DIR="${BUILD_DIR}/toolchain" + export PATH="${TOOLCHAIN_DIR}/bin:${TOOLCHAIN_DIR}/usr/bin:${PATH}" + export SYSROOT="${TOOLCHAIN_DIR}/usr/${TOOLCHAIN_ARCH}-cros-linux-gnu" + export HOSTCC="x86_64-pc-linux-gnu-clang" + export HOSTCXX="x86_64-pc-linux-gnu-clang++" + export LD="${TOOLCHAIN_ARCH}-cros-linux-gnu-ld.lld" + export HOSTLD="x86_64-pc-linux-gnu-ld.lld" + export OBJCOPY=llvm-objcopy + export STRIP=llvm-strip + export KERNEL_ARCH + export TOOLCHAIN_ARCH + export LLVM_IAS=1 + if [[ "${MODE}" = "release" || "${MODE}" = "build" || "${MODE}" = "custom" ]]; then + local -r headers_dir=$(ls -d ${BUILD_DIR}/${KERNEL_HEADERS_DIR}/usr/src/linux-headers*) + export KHEADERS="${headers_dir}" + fi +} + +kmake() { + local output_dir_arg="KBUILD_OUTPUT=" + if [[ "${KBUILD_OUTPUT}" != "." ]]; then + output_dir_arg="KBUILD_OUTPUT=${KBUILD_OUTPUT}" + fi + env ARCH=${KERNEL_ARCH} make ARCH=${KERNEL_ARCH} \ + CC="${CC}" CXX="${CXX}" LD="${LD}" \ + STRIP="${STRIP}" OBJCOPY="${OBJCOPY}" \ + HOSTCC="${HOSTCC}" HOSTCXX="${HOSTCXX}" HOSTLD="${HOSTLD}" \ + "${output_dir_arg}" \ + "$@" +} +export -f kmake + +gpu_build() { + if [[ ${KERNEL_ARCH} != "x86_64" ]]; then + echo "GPU driver builds only tested for x86. + Current architecture detected: ${KERNEL_ARCH}" + exit 1 + fi + make -C "/src/${GPU_DIR}" modules VERBOSE=1 V=1 \ + SYSSRC="/src/" \ + TARGET_ARCH=${KERNEL_ARCH} \ + CC="x86_64-cros-linux-gnu-clang" \ + LD="x86_64-cros-linux-gnu-ld.bfd" \ + AR="x86_64-cros-linux-gnu-ar" \ + CXX="x86_64-cros-linux-gnu-gcc" \ + OBJCOPY="x86_64-cros-linux-gnu-objcopy" \ + OBJDUMP="x86_64-cros-linux-gnu-objdump" \ + NV_VERBOSE=1 IGNORE_CC_MISMATCH=yes \ + "$@" +} + +tar_kernel_headers() { + local -r version=$(kmake "$@" -s kernelrelease) + local -r tmpdir="$(mktemp -d)" + local arch_dir + case "${KERNEL_ARCH}" in + x86_64) arch_dir="x86" ;; + arm64) arch_dir="arm64" ;; + *) + echo "Unknown kernel architecture: ${KERNEL_ARCH}" + exit $RETCODE_ERROR + ;; + esac + + ( + find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl + find arch/*/include include scripts -type f -o -type l + find "arch/${arch_dir}" -name module.lds -o -name Kbuild.platforms -o -name Platform + find "arch/${arch_dir}" -name include -o -name scripts -type d | while IFS='' read -r line; do + find "${line}" -type f + done + ) > "${tmpdir}/hdrsrcfiles" + + pushd "${KBUILD_OUTPUT}" + ( + if [[ -d tools/objtool ]]; then + find tools/objtool -type f -executable + fi + find "arch/${arch_dir}/include" Module.symvers System.map \ + include scripts .config \ + -type f ! -name "*.cmd" ! -name "*.o" + ) > "${tmpdir}/hdrobjfiles" + popd + + local -r destdir="${tmpdir}/headers_tmp/usr/src/linux-headers-${version}" + mkdir -p "${destdir}" + mkdir -p "${destdir}/build" + tar -c -f - -T "${tmpdir}/hdrsrcfiles" | tar -xf - -C "${destdir}" + # separate generated files and main sources for now + # this is to prevent breakage in linux-info.eclass that + # rely on src and build being separated + tar -c -f - -C ${KBUILD_OUTPUT} -T "${tmpdir}/hdrobjfiles" | tar -xf - -C "${destdir}/build" + echo "include ../Makefile" > "${destdir}/build/Makefile" + + rm "${tmpdir}/hdrsrcfiles" "${tmpdir}/hdrobjfiles" + + tar -C "${tmpdir}/headers_tmp" -c -z -f "cos-kernel-headers-${version}-${KERNEL_ARCH}.tgz" . + rm -rf "${tmpdir}" +} + +kernel_build() { + local -r tmproot_dir="$(mktemp -d)" + local image_target + + case "${KERNEL_ARCH}" in + x86_64) image_target="bzImage" ;; + arm64) image_target="Image" ;; + *) + echo "Unknown kernel architecture: ${KERNEL_ARCH}" + exit $RETCODE_ERROR + ;; + esac + + if [[ "${CLEAN_BEFORE_BUILD}" = "true" ]]; then + kmake "$@" mrproper + fi + kmake "$@" "${KERNEL_CONFIGS[@]}" + kmake "$@" "${image_target}" modules + # kernelrelease should be evaluated after the build + # otherwise CONFIG_LOCALVERSION value is not picked up properly + local -r version=$(kmake "$@" -s kernelrelease) + INSTALL_MOD_PATH="${tmproot_dir}" kmake "$@" modules_install + + mkdir -p "${tmproot_dir}/boot/" + cp -v -- "${KBUILD_OUTPUT}/.config" "${tmproot_dir}/boot/config-${version}" + cp -v -- "${KBUILD_OUTPUT}/arch/${KERNEL_ARCH}/boot/${image_target}" "${tmproot_dir}/boot/vmlinuz-${version}" + + for module in $(find "$tmproot_dir/lib/modules/" -name "*.ko" -printf '%P\n'); do + module="lib/modules/$module" + mkdir -p "$(dirname "$tmproot_dir/usr/lib/debug/$module")" + # only keep debug symbols in the debug file + $OBJCOPY --only-keep-debug "$tmproot_dir/$module" "$tmproot_dir/usr/lib/debug/$module" + # strip original module from debug symbols + $OBJCOPY --strip-debug "$tmproot_dir/$module" + # then add a link to those + $OBJCOPY --add-gnu-debuglink="$tmproot_dir/usr/lib/debug/$module" "$tmproot_dir/$module" + done + + if [[ "${BUILD_DEBUG_PACKAGE}" = "true" ]]; then + cp -v -- "${KBUILD_OUTPUT}/vmlinux" "${tmproot_dir}/usr/lib/debug/lib/modules/${version}/" + # Some other tools expect other locations + mkdir -p "$tmproot_dir/usr/lib/debug/boot/" + ln -s "../lib/modules/$version/vmlinux" "$tmproot_dir/usr/lib/debug/boot/vmlinux-$version" + ln -s "lib/modules/$version/vmlinux" "$tmproot_dir/usr/lib/debug/vmlinux-$version" + tar -c -J -f "cos-kernel-debug-${version}-${KERNEL_ARCH}.txz" -C "${tmproot_dir}/usr/lib" debug/ + fi + + tar -c -J -f "cos-kernel-${version}-${KERNEL_ARCH}.txz" -C "${tmproot_dir}" boot/ lib/ + rm -rf "${tmproot_dir}" + + if [[ "${BUILD_HEADERS_PACKAGE}" = "true" ]]; then + tar_kernel_headers + fi + + # pass env information + echo "CC=${CC}" > "${KBUILD_OUTPUT}/toolchain_env" + echo "CXX=${CXX}" >> "${KBUILD_OUTPUT}/toolchain_env" + + # pass toolchain source location + if [[ -f "${BUILD_DIR}/toolchain_url" ]]; then + cp "${BUILD_DIR}/toolchain_url" "${KBUILD_OUTPUT}/toolchain_url"; + fi +} + +module_build() { + if [[ "${CLEAN_BEFORE_BUILD}" = "true" ]]; then + kmake -C "${KHEADERS}" M="$(pwd)" "$@" clean + fi + kmake -C "${KHEADERS}" M="$(pwd)" "$@" modules +} + +usage() { +cat 1>&2 <<__EOUSAGE__ +Usage: $0 [-k | -m | -i] [-cdH] [-A ] + [-C [,fragment1.config,...]] [-O ] + [-B -b | -R | -G ] + [-t ] [VAR=value ...] [target ...] + +Options: + -A target architecture. Valid values are x86_64 and arm64. + -B seed the toolchain from the COS build . + Example: R93-16623.0.0. Instead of the actual + build number developer can specify the branch name + to use the latest build off that branch. + Example: main-R93, release-R89. Requires -b option. + -C kernel configs target. Example: lakitu_defconfig. + It's also possible to specify main config and fragments + separated by coma, i.e.: lakitu_defconfig,google/xfstest.config + -G seed the toolchain and kernel headers from the custom + GCS bucket . Directory structure needs to conform + to the COS standard. + -H create a package with kernel headers for the respective + kernel package. Should be used only with -k option. + -O value for KBUILD_OUTPUT to separate obj files from + sources + -R seed the toolchain and kernel headers from the + specified official COS release. Example: 16442.0.0 + -b specify board for -B argument. Example: lakitu + -c perform "mrproper" step when building a kernel package or + "clean" step when building a module. + Should be used only with -k and -m option. + -d create a package with debug symbols for the respective + kernel package. Should be used only with -k option. + -h show this message. + -i invoke interactive shell with kernel development + environment initialized. + -k build a kernel package for sources mapped from the host + to the current working directory. + -m build an out-of-tree module for sources mapped from + the host to the current working directory. + This mode requires either -R or -B/b options. + -t seed the toolchain from the Chromium OS upstream. + Example: 2021.06.26.094653 + -x build the nvidia gpu modules from the specified source relative + to the kernel source directory. Output nvidia gpu modules + present in the /kernel-open/ dir. + Example: -x nvidia/kernel-module-src, modules generated in: + nvidia/kernel-module-src/kernel-open as nvidia*.ko +__EOUSAGE__ + + exit $RETCODE_ERROR +} + +main() { + local build_target="" + local custom_bucket="" + get_cos_tools_bucket + while getopts "A:B:C:G:HO:R:b:cdhikmtx:" o; do + case "${o}" in + A) KERNEL_ARCH=${OPTARG} ;; + B) BUILD_ID=${OPTARG} ;; + C) KERNEL_CONFIGS=(${OPTARG//,/ }) ;; + G) custom_bucket=${OPTARG} ;; + H) BUILD_HEADERS_PACKAGE="true" ;; + O) KBUILD_OUTPUT=${OPTARG} ;; + R) RELEASE_ID=${OPTARG} ;; + b) BOARD=${OPTARG} ;; + c) CLEAN_BEFORE_BUILD="true" ;; + d) BUILD_DEBUG_PACKAGE="true" ;; + h) usage ;; + i) build_target="shell" ;; + k) build_target="kernel" ;; + m) build_target="module" ;; + t) CROS_TC_VERSION="${OPTARG}" ;; + x) build_target="gpu" + GPU_DIR=${OPTARG} ;; + *) usage ;; + esac + done + shift $((OPTIND-1)) + + if [[ ! -z "${BOARD}" ]]; then + case "${BOARD}" in + lakitu-arm64) KERNEL_ARCH=arm64 ;; + *) KERNEL_ARCH=x86_64 ;; + esac + fi + + case "${KERNEL_ARCH}" in + x86_64) + TOOLCHAIN_ARCH=x86_64 + ;; + arm64) + TOOLCHAIN_ARCH=aarch64 + ;; + *) + echo "Invalid -A value: $KERNEL_ARCH" + usage + ;; + esac + + echo "** Kernel architecture: $KERNEL_ARCH" + echo "** Toolchain architecture: $TOOLCHAIN_ARCH" + + if [[ -n "$RELEASE_ID" ]]; then + MODE="release" + BUILD_DIR="/build/${TOOLCHAIN_ARCH}-${RELEASE_ID}" + echo "** COS release: $RELEASE_ID" + fi + + if [[ -n "$BUILD_ID" ]]; then + if ! [[ $BUILD_ID =~ R[0-9]+-[0-9.]+ ]]; then + BRANCH="${BUILD_ID}" + echo "** Obtaining the latest build # for branch ${BRANCH}..." + readonly latest="${COS_CI_DOWNLOAD_GCS}/${BOARD}-release/LATEST-${BUILD_ID}" + BUILD_ID=$(gsutil -q cat "${latest}" || true) + if [[ -n "$BUILD_ID" ]]; then + echo "** Latest build for branch ${BRANCH} is ${BUILD_ID}" + else + echo "** Failed to find latest build for branch ${BRANCH}" + exit 1 + fi + fi + fi + + if [[ -z "$MODE" && -n "$BOARD" && -n "$BUILD_ID" ]]; then + MODE="build" + echo "** COS build: $BOARD-$BUILD_ID" + BUILD_DIR="/build/${BOARD}-${BUILD_ID}" + fi + + if [[ -z "$MODE" && -n "$custom_bucket" ]]; then + MODE="custom" + BUILD_DIR="/build/$(basename "${custom_bucket}")" + fi + + if [[ -z "$MODE" ]]; then + MODE="cross" + BUILD_DIR="/build/cros-${CROS_TC_VERSION}-${TOOLCHAIN_ARCH}" + fi + echo "Mode: $MODE" + + if [[ -n "${BUILD_DIR}" ]]; then + if [[ ! -d "${BUILD_DIR}" ]]; then + mkdir -p "${BUILD_DIR}" + case "$MODE" in + cross) + install_generic_cross_toolchain + ;; + release) + install_release_cross_toolchain + install_release_kernel_headers + ;; + build) + local -r bucket="${COS_CI_DOWNLOAD_GCS}/${BOARD}-release/${BUILD_ID}" + install_build_cross_toolchain "${bucket}" + install_build_kernel_headers "${bucket}" + ;; + custom) + install_build_cross_toolchain "${custom_bucket}" + install_build_kernel_headers "${custom_bucket}" + ;; + esac + fi + fi + + set_compilation_env + + case "${build_target}" in + kernel) kernel_build -j"$(nproc)" ;; + module) module_build -j"$(nproc)" ;; + shell) + echo "Starting interactive shell for the kernel devenv" + /bin/bash + ;; + gpu) gpu_build -j"$(nproc)" ;; + *) kmake -j"$(nproc)" "$@" ;; + esac +} + +main "$@" diff --git a/weka-operator/weka-operator/resources/run-weka-cli.sh b/weka-operator/weka-operator/resources/run-weka-cli.sh new file mode 100644 index 00000000..12cda779 --- /dev/null +++ b/weka-operator/weka-operator/resources/run-weka-cli.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -o pipefail +set -e + +if [[ -f /var/run/secrets/weka-operator/operator-user/username ]]; then + export WEKA_USERNAME=`cat /var/run/secrets/weka-operator/operator-user/username` + export WEKA_PASSWORD=`cat /var/run/secrets/weka-operator/operator-user/password` + export WEKA_ORG=`cat /var/run/secrets/weka-operator/operator-user/org` +fi + +# comes either out of pod spec on repeat run or from resources.json on first run +if [[ "$PORT" == "0" ]]; then + if [[ -f /opt/weka/k8s-runtime/vars/port ]]; then + export PORT=`cat /opt/weka/k8s-runtime/vars/port` + export WEKA_PORT=`cat /opt/weka/k8s-runtime/vars/port` + fi +fi + +if [[ "$AGENT_PORT" == "0" ]]; then + if [[ -f /opt/weka/k8s-runtime/vars/agent_port ]]; then + export AGENT_PORT=`cat /opt/weka/k8s-runtime/vars/agent_port` + fi +fi + + +/usr/bin/weka "$@" diff --git a/weka-operator/weka-operator/resources/syslog-ng.conf b/weka-operator/weka-operator/resources/syslog-ng.conf new file mode 100644 index 00000000..ce535034 --- /dev/null +++ b/weka-operator/weka-operator/resources/syslog-ng.conf @@ -0,0 +1,60 @@ +@version: 3.35 + +options { + use_dns(no); + dns_cache(no); + keep_hostname(yes); + create_dirs(yes); + ts_format(iso); +}; + +source s_net { + unix-stream("/var/run/syslog-ng/syslog-ng.sock"); + unix-dgram("/run/systemd/journal/dev-log", create_dirs(yes)); +}; + +destination d_stdout { + file("/dev/stdout" + template("$ISODATE $MSGHDR | $MSG\n") + ); +}; + +destination d_syslog { + file("/var/log/syslog" + template("$ISODATE $MSGHDR | $MSG\n") + template_escape(no) + ); +}; + +destination d_error { + file("/var/log/error" + template("$ISODATE $MSGHDR | $MSG\n") + template_escape(no) + ); +}; + +filter f_info { + match(".*(NOTICE|WARN(ING)*|ERR(OR)*|CRIT(ICAL)*|ALERT|EMERG(ENCY)*|FATAL|ASSERT):.*" value("MESSAGE")); +}; + +filter f_error { + match(".*(ERR(OR)*|CRIT(ICAL)*|ALERT|EMERG(ENCY)*|FATAL|ASSERT):.*" value("MESSAGE")); +}; + +log { + source(s_net); + filter(f_info); + destination(d_stdout); +}; + +log { + source(s_net); + destination(d_syslog); +}; + +log { + source(s_net); + filter(f_error); + destination(d_error); +}; + diff --git a/weka-operator/weka-operator/resources/weka_runtime.py b/weka-operator/weka-operator/resources/weka_runtime.py new file mode 100644 index 00000000..04871f0a --- /dev/null +++ b/weka-operator/weka-operator/resources/weka_runtime.py @@ -0,0 +1,2669 @@ +import base64 +import fcntl +import ipaddress +import json +import logging +import os +import re +import socket +import struct +import subprocess +import sys +import threading +import time +import uuid +from dataclasses import dataclass, asdict +from functools import lru_cache, partial +from os import makedirs +from os.path import exists +from textwrap import dedent +from typing import List, Optional, Tuple + +@dataclass +class SignOptions: + allowEraseWekaPartitions: bool = False + allowEraseNonWekaPartitions: bool = False + allowNonEmptyDevice: bool = False + skipTrimFormat: bool = False + + +@dataclass +class Disk: + path: str + is_mounted: bool + serial_id: Optional[str] + + +MODE = os.environ.get("MODE") +assert MODE != "" +NUM_CORES = int(os.environ.get("CORES", 0)) +CORE_IDS = os.environ.get("CORE_IDS", "auto") +CPU_POLICY = os.environ.get("CPU_POLICY", "auto") +NAME = os.environ["NAME"] +NETWORK_DEVICE = os.environ.get("NETWORK_DEVICE", "") +SUBNETS = os.environ.get("SUBNETS", "") +PORT = os.environ.get("PORT", "") +AGENT_PORT = os.environ.get("AGENT_PORT", "") +RESOURCES = {} # to be populated at later stage +MEMORY = os.environ.get("MEMORY", "") +JOIN_IPS = os.environ.get("JOIN_IPS", "") +DIST_SERVICE = os.environ.get("DIST_SERVICE") +OS_DISTRO = "" +OS_BUILD_ID = "" +DISCOVERY_SCHEMA = 1 +INSTRUCTIONS = os.environ.get("INSTRUCTIONS", "") +NODE_NAME = os.environ["NODE_NAME"] +POD_ID = os.environ.get("POD_ID", "") +FAILURE_DOMAIN = os.environ.get("FAILURE_DOMAIN", None) +MACHINE_IDENTIFIER = os.environ.get("MACHINE_IDENTIFIER", None) +NET_GATEWAY = os.environ.get("NET_GATEWAY", None) +IS_IPV6 = os.environ.get("IS_IPV6", "false") == "true" +MANAGEMENT_IPS = [] # to be populated at later stage +UDP_MODE = os.environ.get("UDP_MODE", "false") == "true" +DUMPER_CONFIG_MODE = os.environ.get("DUMPER_CONFIG_MODE", "auto") + +KUBERNETES_DISTRO_OPENSHIFT = "openshift" +KUBERNETES_DISTRO_GKE = "gke" +OS_NAME_GOOGLE_COS = "cos" +OS_NAME_REDHAT_COREOS = "rhcos" + +MAX_TRACE_CAPACITY_GB = os.environ.get("MAX_TRACE_CAPACITY_GB", 10) +ENSURE_FREE_SPACE_GB = os.environ.get("ENSURE_FREE_SPACE_GB", 20) + +WEKA_CONTAINER_ID = os.environ.get("WEKA_CONTAINER_ID", "") +WEKA_PERSISTENCE_DIR = "/host-binds/opt-weka" +WEKA_PERSISTENCE_MODE = os.environ.get("WEKA_PERSISTENCE_MODE", "local") +WEKA_PERSISTENCE_GLOBAL_DIR = "/opt/weka-global-persistence" +if WEKA_PERSISTENCE_MODE == "global": + WEKA_PERSISTENCE_DIR = os.path.join(WEKA_PERSISTENCE_GLOBAL_DIR, "containers", WEKA_CONTAINER_ID) + +WEKA_COS_ALLOW_HUGEPAGE_CONFIG = True if os.environ.get("WEKA_COS_ALLOW_HUGEPAGE_CONFIG", "false") == "true" else False +WEKA_COS_ALLOW_DISABLE_DRIVER_SIGNING = True if os.environ.get("WEKA_COS_ALLOW_DISABLE_DRIVER_SIGNING", + "false") == "true" else False +WEKA_COS_GLOBAL_HUGEPAGE_SIZE = os.environ.get("WEKA_COS_GLOBAL_HUGEPAGE_SIZE", "2M").lower() +WEKA_COS_GLOBAL_HUGEPAGE_COUNT = int(os.environ.get("WEKA_COS_GLOBAL_HUGEPAGE_COUNT", 4000)) + +AWS_VENDOR_ID = "1d0f" +AWS_DEVICE_ID = "cd01" +AUTO_REMOVE_TIMEOUT = int(os.environ.get("AUTO_REMOVE_TIMEOUT", "0")) + +# for client dynamic port allocation +BASE_PORT = os.environ.get("BASE_PORT", "") +PORT_RANGE = os.environ.get("PORT_RANGE", "0") +WEKA_CONTAINER_PORT_SUBRANGE = 100 +MAX_PORT = 65535 + +# Define global variables +exiting = 0 + +# Formatter with channel name +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + +# Define handlers for stdout and stderr +stdout_handler = logging.StreamHandler(sys.stdout) +stdout_handler.setLevel(logging.DEBUG) +stderr_handler = logging.StreamHandler(sys.stderr) +stderr_handler.setLevel(logging.WARNING) + +# Basic configuration +logging.basicConfig( + level=logging.DEBUG, # Global minimum logging level + format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', # Include timestamp + handlers=[stdout_handler, stderr_handler] +) + + +async def get_serial_id_fallback(device_path: str) -> Optional[str]: + """ + Fallback method to get serial ID for a device using udev data. + This is useful for non-nvme devices where lsblk might not report a serial. + """ + device_name = os.path.basename(device_path) + logging.info(f"Attempting fallback to get serial for {device_name}") + try: + # Get major:minor device number + dev_index_out, _, ec = await run_command(f"cat /sys/block/{device_name}/dev") + if ec != 0: + logging.warning(f"Fallback failed: could not get dev index for {device_name}") + return None + dev_index = dev_index_out.decode().strip() + + # Get serial from udev data + serial_id_cmd = f"cat /host/run/udev/data/b{dev_index} | grep ID_SERIAL=" + serial_id_out, _, ec = await run_command(serial_id_cmd) + if ec != 0: + logging.warning(f"Fallback failed: could not get ID_SERIAL from udev for {device_name}") + return None + + serial_id = serial_id_out.decode().strip().split("=")[-1] + logging.info(f"Fallback successful for {device_name}, found serial: {serial_id}") + return serial_id + except Exception as e: + logging.error(f"Exception during serial ID fallback for {device_name}: {e}") + return None + + +async def sign_drives_by_pci_info(vendor_id: str, device_id: str, options: dict) -> List[str]: + logging.info("Signing drives. Vendor ID: %s, Device ID: %s", vendor_id, device_id) + + if not vendor_id or not device_id: + raise ValueError("Vendor ID and Device ID are required") + + cmd = f"lspci -d {vendor_id}:{device_id}" + " | sort | awk '{print $1}'" + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + return + + signed_drives = [] + pci_devices = stdout.decode().strip().split() + for pci_device in pci_devices: + device = f"/dev/disk/by-path/pci-0000:{pci_device}-nvme-1" + try: + await sign_device_path(device, options) + signed_drives.append(device) + except SignException as e: + logging.error(str(e)) + continue + return signed_drives + + +async def find_disks() -> List[Disk]: + """ + Find all disk devices and check if they or their partitions are mounted. + :return: A list of Disk objects. + """ + logging.info("Finding disks and checking mount status") + # Use -J for JSON output, -p for full paths, -o to specify columns + # TODO: We are dependant on lsblk here on host here. Is it a probelm? potentially + cmd = "nsenter --mount --pid --target 1 -- lsblk -p -J -o NAME,TYPE,MOUNTPOINT,SERIAL" + stdout, stderr, ec = await run_command(cmd, capture_stdout=True) + if ec != 0: + logging.error(f"Failed to execute lsblk: {stderr.decode()}") + return [] + + try: + data = json.loads(stdout) + except json.JSONDecodeError: + logging.error(f"Failed to parse lsblk JSON output: {stdout.decode()}") + return [] + + disks = [] + + def has_mountpoint(device_info: dict) -> bool: + """Recursively check if a device or any of its children has a mountpoint.""" + if device_info.get("mountpoint"): + return True + if "children" in device_info: + for child in device_info["children"]: + if has_mountpoint(child): + return True + return False + + for device in data.get("blockdevices", []): + if device.get("type") == "disk": + is_mounted = has_mountpoint(device) + serial_id = device.get("serial") + device_path = device["name"] + if not serial_id: + logging.warning(f"lsblk did not return serial for {device_path}. Using fallback.") + serial_id = await get_serial_id_fallback(device_path) + logging.info(f"Found drive: {device_path}, mounted: {is_mounted}, serial: {serial_id}") + disks.append(Disk(path=device_path, is_mounted=is_mounted, serial_id=serial_id)) + + return disks + + +async def sign_not_mounted(options: dict) -> List[str]: + """ + Signs all disk devices that are not mounted and have no mounted partitions. + :param options: + :return: list of signed drive paths + """ + logging.info("Signing drives that are not mounted") + all_disks = await find_disks() + signed_drives = [] + + unmounted_disks = [disk for disk in all_disks if not disk.is_mounted] + logging.info(f"Found {len(unmounted_disks)} unmounted disks to sign: {[d.path for d in unmounted_disks]}") + + for disk in unmounted_disks: + try: + await sign_device_path(disk.path, options) + signed_drives.append(disk.path) + except SignException as e: + logging.error(str(e)) + continue + return signed_drives + + +async def sign_device_paths(devices_paths, options) -> List[str]: + signed_drives = [] + for device_path in devices_paths: + try: + await sign_device_path(device_path, options) + signed_drives.append(device_path) + except SignException as e: + logging.error(str(e)) + continue + return signed_drives + + +class SignException(Exception): + pass + + +async def sign_device_path(device_path, options: SignOptions): + logging.info(f"Signing drive {device_path}") + params = [] + if options.allowEraseWekaPartitions: + params.append("--allow-erase-weka-partitions") + if options.allowEraseNonWekaPartitions: + params.append("--allow-erase-non-weka-partitions") + if options.allowNonEmptyDevice: + params.append("--allow-non-empty-device") + if options.skipTrimFormat: + params.append("--skip-trim-format") + + stdout, stderr, ec = await run_command( + f"/weka-sign-drive {' '.join(params)} -- {device_path}") + if ec != 0: + err = f"Failed to sign drive {device_path}: {stderr}" + raise SignException(err) + + +async def sign_drives(instruction: dict) -> List[str]: + type = instruction['type'] + options = SignOptions(**instruction.get('options', {})) if instruction.get('options') else SignOptions() + + if type == "aws-all": + return await sign_drives_by_pci_info( + vendor_id=AWS_VENDOR_ID, + device_id=AWS_DEVICE_ID, + options=options + ) + elif type == "device-identifiers": + return await sign_drives_by_pci_info( + vendor_id=instruction.get('pciDevices', {}).get('vendorId'), + device_id=instruction.get('pciDevices', {}).get('deviceId'), + options=options + ) + elif type == "all-not-root": + return await sign_not_mounted(options) + elif type == "device-paths": + return await sign_device_paths(instruction['devicePaths'], options) + else: + raise ValueError(f"Unknown instruction type: {type}") + + +async def force_resign_drives_by_paths(devices_paths: List[str]): + logging.info("Force resigning drives by paths: %s", devices_paths) + signed_drives = [] + options = SignOptions(allowEraseWekaPartitions=True) + for device_path in devices_paths: + try: + await sign_device_path(device_path, options) + signed_drives.append(device_path) + except SignException as e: + logging.error(str(e)) + continue + write_results(dict( + err=None, + drives=signed_drives + )) + + +async def force_resign_drives_by_serials(serials: List[str]): + logging.info("Force resigning drives by serials: %s", serials) + device_paths = [] + for serial in serials: + device_path = await get_block_device_path_by_serial(serial) + device_paths.append(device_path) + + await force_resign_drives_by_paths(device_paths) + + +async def get_block_device_path_by_serial(serial: str): + logging.info(f"Getting block device path by serial {serial}") + stdout, stderr, ec = await run_command( + "lsblk -dpno NAME | grep -w $(basename $(ls -la /dev/disk/by-id/ | grep -m 1 " + serial + " | awk '{print $NF}'))") + if ec != 0: + logging.error(f"Failed to get block device path by serial {serial}: {stderr}") + return + device_path = stdout.decode().strip() + return device_path + + +async def discover_drives(): + drives = await find_weka_drives() + raw_disks = await find_disks() + write_results(dict( + err=None, + drives=drives, + raw_drives=[asdict(d) for d in raw_disks], + )) + + +async def find_weka_drives(): + drives = [] + # ls /dev/disk/by-path/pci-0000\:03\:00.0-scsi-0\:0\:3\:0 | ssd + + devices_by_id = subprocess.check_output("ls /dev/disk/by-id/", shell=True).decode().strip().split() + devices_by_path = subprocess.check_output("ls /dev/disk/by-path/", shell=True).decode().strip().split() + + part_names = [] + + def resolve_to_part_name(): + # TODO: A bit dirty, consolidate paths + for device in devices_by_path: + try: + part_name = subprocess.check_output(f"basename $(readlink -f /dev/disk/by-path/{device})", + shell=True).decode().strip() + except subprocess.CalledProcessError: + logging.error(f"Failed to get part name for {device}") + continue + part_names.append(part_name) + for device in devices_by_id: + try: + part_name = subprocess.check_output(f"basename $(readlink -f /dev/disk/by-id/{device})", + shell=True).decode().strip() + if part_name in part_names: + continue + except subprocess.CalledProcessError: + logging.error(f"Failed to get part name for {device}") + continue + part_names.append(part_name) + + resolve_to_part_name() + + logging.info(f"All found in kernel block devices: {part_names}") + for part_name in part_names: + try: + type_id = subprocess.check_output(f"blkid -s PART_ENTRY_TYPE -o value -p /dev/{part_name}", + shell=True).decode().strip() + except subprocess.CalledProcessError: + logging.error(f"Failed to get PART_ENTRY_TYPE for {part_name}") + continue + + if type_id == "993ec906-b4e2-11e7-a205-a0a8cd3ea1de": + # TODO: Read and populate actual weka guid here + weka_guid = "" + # resolve block_device to serial id + pci_device_path = subprocess.check_output(f"readlink -f /sys/class/block/{part_name}", + shell=True).decode().strip() + if "nvme" in part_name: + # 3 directories up is the serial id + serial_id_path = "/".join(pci_device_path.split("/")[:-2]) + "/serial" + serial_id = subprocess.check_output(f"cat {serial_id_path}", shell=True).decode().strip() + device_path = "/dev/" + pci_device_path.split("/")[-2] + else: + device_name = pci_device_path.split("/")[-2] + device_path = "/dev/" + device_name + dev_index = subprocess.check_output(f"cat /sys/block/{device_name}/dev", shell=True).decode().strip() + serial_id_cmd = f"cat /host/run/udev/data/b{dev_index} | grep ID_SERIAL=" + serial_id = subprocess.check_output(serial_id_cmd, shell=True).decode().strip().split("=")[-1] + + drives.append({ + "partition": "/dev/" + part_name, + "block_device": device_path, + "serial_id": serial_id, + "weka_guid": weka_guid + }) + + return drives + + +def is_google_cos(): + return OS_DISTRO == OS_NAME_GOOGLE_COS + + +def is_rhcos(): + return OS_DISTRO == OS_NAME_REDHAT_COREOS + + +def wait_for_syslog(): + while not os.path.isfile('/var/run/syslog-ng.pid'): + time.sleep(0.1) + print("Waiting for syslog-ng to start") + + +def wait_for_agent(): + while not os.path.isfile('/var/run/weka-agent.pid'): + time.sleep(1) + print("Waiting for weka-agent to start") + + +async def ensure_drivers(): + logging.info("waiting for drivers") + drivers = "wekafsio wekafsgw mpin_user".split() + if not is_google_cos(): + drivers.append("igb_uio") + if version_params.get('uio_pci_generic') is not False: + drivers.append("uio_pci_generic") + driver_mode = await is_legacy_driver_cmd() + logging.info(f"validating drivers in mode {MODE}, driver mode: {driver_mode}") + if not await is_legacy_driver_cmd() and MODE in ["client", "s3", + "nfs"]: # we are not using legacy driver on backends, as it should not be validating specific versions, so just lsmoding + while not exiting: + version = await get_weka_version() + stdout, stderr, ec = await run_command(f"weka driver ready --without-agent --version {version}") + if ec != 0: + with open("/tmp/weka-drivers.log_tmp", "w") as f: + f.write("weka-drivers-loading") + logging.warning(f"Drivers are not loaded, waiting for them") + os.rename("/tmp/weka-drivers.log_tmp", "/tmp/weka-drivers.log") + logging.error(f"Failed to validate drivers {stderr.decode('utf-8')}: exc={ec}") + await asyncio.sleep(1) + continue + logging.info("drivers are ready") + break + else: + for driver in drivers: + while True: + stdout, stderr, ec = await run_command(f"lsmod | grep -w {driver}") + if ec == 0: + break + # write driver name into /tmp/weka-drivers.log + logging.info(f"Driver {driver} not loaded, waiting for it") + with open("/tmp/weka-drivers.log_tmp", "w") as f: + logging.warning(f"Driver {driver} not loaded, waiting for it") + f.write(driver) + os.rename("/tmp/weka-drivers.log_tmp", "/tmp/weka-drivers.log") + await asyncio.sleep(1) + continue + + with open("/tmp/weka-drivers.log_tmp", "w") as f: + f.write("") + os.rename("/tmp/weka-drivers.log_tmp", "/tmp/weka-drivers.log") + logging.info("All drivers loaded successfully") + + +# This atrocities should be replaced by new weka driver build/publish/download/install functionality +VERSION_TO_DRIVERS_MAP_WEKAFS = { + "4.3.1.29791-9f57657d1fb70e71a3fb914ff7d75eee-dev": dict( + wekafs="cc9937c66eb1d0be-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.3.2.560-842278e2dca9375f84bd3784a4e7515c-dev3": dict( + wekafs="1acd22f9ddbda67d-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.3.2.560-842278e2dca9375f84bd3784a4e7515c-dev4": dict( + wekafs="1acd22f9ddbda67d-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.3.2.560-842278e2dca9375f84bd3784a4e7515c-dev5": dict( + wekafs="1acd22f9ddbda67d-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.3.2.783-f5fe2ec58286d9fa8fc033f920e6c842-dev": dict( + wekafs="1cb1639d52a2b9ca-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.3.3.28-k8s-alpha-dev": dict( + wekafs="1cb1639d52a2b9ca-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.3.3.28-k8s-alpha-dev2": dict( + wekafs="1cb1639d52a2b9ca-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.3.3.28-k8s-alpha-dev3": dict( + wekafs="1cb1639d52a2b9ca-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.3.2.783-f5fe2ec58286d9fa8fc033f920e6c842-dev2": dict( + wekafs="1cb1639d52a2b9ca-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.3.2.783-f5fe2ec58286d9fa8fc033f920e6c842-dev3": dict( + wekafs="1cb1639d52a2b9ca-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="6b519d501ea82063", + ), + "4.2.7.64-k8so-beta.10": dict( + wekafs="1.0.0-995f26b334137fd78d57c264d5b19852-GW_aedf44a11ca66c7bb599f302ae1dff86", + ), + "4.2.10.1693-251d3172589e79bd4960da8031a9a693-dev": dict( # dev 4.2.7-based version + wekafs="1.0.0-995f26b334137fd78d57c264d5b19852-GW_aedf44a11ca66c7bb599f302ae1dff86", + ), + "4.2.10.1290-e552f99e92504c69126da70e1740f6e4-dev": dict( + wekafs="1.0.0-c50570e208c935e9129c9054140ab11a-GW_aedf44a11ca66c7bb599f302ae1dff86", + ), + "4.2.10-k8so.0": dict( + wekafs="1.0.0-c50570e208c935e9129c9054140ab11a-GW_aedf44a11ca66c7bb599f302ae1dff86", + ), + "4.2.10.1671-363e1e8fcfb1290e061815445e973310-dev": dict( + wekafs="1.0.0-c50570e208c935e9129c9054140ab11a-GW_aedf44a11ca66c7bb599f302ae1dff86", + ), + "4.3.3": dict( + wekafs="cbd05f716a3975f7-GW_556972ab1ad2a29b0db5451e9db18748", + uio_pci_generic=False, + dependencies="7955984e4bce9d8b", + weka_drivers_handling=False, + ), +} +# WEKA_DRIVER_VERSION_OPTIONS = [ +# "1.0.0-c50570e208c935e9129c9054140ab11a-GW_aedf44a11ca66c7bb599f302ae1dff86", +# "1.0.0-995f26b334137fd78d57c264d5b19852-GW_aedf44a11ca66c7bb599f302ae1dff86", +# ] +IGB_UIO_DRIVER_VERSION = "weka1.0.2" +MPIN_USER_DRIVER_VERSION = "1.0.1" +UIO_PCI_GENERIC_DRIVER_VERSION = "5f49bb7dc1b5d192fb01b442b17ddc0451313ea2" +DEFAULT_DEPENDENCY_VERSION = "1.0.0-024f0fdaa33ec66087bc6c5631b85819" + +IMAGE_NAME = os.environ.get("IMAGE_NAME") +DEFAULT_PARAMS = dict( + weka_drivers_handling=True, + uio_pci_generic=False, +) +version_params = VERSION_TO_DRIVERS_MAP_WEKAFS.get(os.environ.get("IMAGE_NAME").split(":")[-1], DEFAULT_PARAMS) +if "4.2.7.64-s3multitenancy." in IMAGE_NAME: + version_params = dict( + wekafs="1.0.0-995f26b334137fd78d57c264d5b19852-GW_aedf44a11ca66c7bb599f302ae1dff86", + mpin_user="f8c7f8b24611c2e458103da8de26d545", + igb_uio="b64e22645db30b31b52f012cc75e9ea0", + uio_pci_generic="1.0.0-929f279ce026ddd2e31e281b93b38f52", + ) +assert version_params + +WEKA_DRIVERS_HANDLING = True if version_params.get("weka_drivers_handling") else False + +# Implement the rest of your logic here +import asyncio +import os +import signal + +loop = asyncio.get_event_loop() + + +async def get_weka_version(): + files = os.listdir("/opt/weka/dist/release") + assert len(files) == 1, Exception(f"More then one release found: {files}") + version = files[0].partition(".spec")[0] + return version + + +async def load_drivers(): + def should_skip_uio_pci_generic(): + return version_params.get('uio_pci_generic') is False or should_skip_uio() + + def should_skip_uio(): + return is_google_cos() + + def should_skip_igb_uio(): + return should_skip_uio() + + if is_rhcos(): + if os.path.isdir("/hostpath/lib/modules"): + os.system("cp -r /hostpath/lib/modules/* /lib/modules/") + + if not WEKA_DRIVERS_HANDLING: + # LEGACY MODE + weka_driver_version = version_params.get('wekafs') + download_cmds = [ + (f"mkdir -p /opt/weka/dist/drivers", "creating drivers directory"), + ( + f"curl -kfo /opt/weka/dist/drivers/weka_driver-wekafsgw-{weka_driver_version}-$(uname -r).$(uname -m).ko {DIST_SERVICE}/dist/v1/drivers/weka_driver-wekafsgw-{weka_driver_version}-$(uname -r).$(uname -m).ko", + "downloading wekafsgw driver"), + ( + f"curl -kfo /opt/weka/dist/drivers/weka_driver-wekafsio-{weka_driver_version}-$(uname -r).$(uname -m).ko {DIST_SERVICE}/dist/v1/drivers/weka_driver-wekafsio-{weka_driver_version}-$(uname -r).$(uname -m).ko", + "downloading wekafsio driver"), + ( + f"curl -kfo /opt/weka/dist/drivers/mpin_user-{MPIN_USER_DRIVER_VERSION}-$(uname -r).$(uname -m).ko {DIST_SERVICE}/dist/v1/drivers/mpin_user-{MPIN_USER_DRIVER_VERSION}-$(uname -r).$(uname -m).ko", + "downloading mpin_user driver") + ] + if not should_skip_igb_uio(): + download_cmds.append(( + f"curl -kfo /opt/weka/dist/drivers/igb_uio-{IGB_UIO_DRIVER_VERSION}-$(uname -r).$(uname -m).ko {DIST_SERVICE}/dist/v1/drivers/igb_uio-{IGB_UIO_DRIVER_VERSION}-$(uname -r).$(uname -m).ko", + "downloading igb_uio driver")) + if not should_skip_uio_pci_generic(): + download_cmds.append(( + f"curl -kfo /opt/weka/dist/drivers/uio_pci_generic-{UIO_PCI_GENERIC_DRIVER_VERSION}-$(uname -r).$(uname -m).ko {DIST_SERVICE}/dist/v1/drivers/uio_pci_generic-{UIO_PCI_GENERIC_DRIVER_VERSION}-$(uname -r).$(uname -m).ko", + "downloading uio_pci_generic driver")) + + load_cmds = [ + ( + f"lsmod | grep -w wekafsgw || insmod /opt/weka/dist/drivers/weka_driver-wekafsgw-{weka_driver_version}-$(uname -r).$(uname -m).ko", + "loading wekafsgw driver"), + ( + f"lsmod | grep -w wekafsio || insmod /opt/weka/dist/drivers/weka_driver-wekafsio-{weka_driver_version}-$(uname -r).$(uname -m).ko", + "loading wekafsio driver"), + ( + f"lsmod | grep -w mpin_user || insmod /opt/weka/dist/drivers/mpin_user-{MPIN_USER_DRIVER_VERSION}-$(uname -r).$(uname -m).ko", + "loading mpin_user driver") + ] + if not should_skip_uio(): + load_cmds.append((f"lsmod | grep -w uio || modprobe uio", "loading uio driver")) + if not should_skip_igb_uio(): + load_cmds.append(( + f"lsmod | grep -w igb_uio || insmod /opt/weka/dist/drivers/igb_uio-{IGB_UIO_DRIVER_VERSION}-$(uname -r).$(uname -m).ko", + "loading igb_uio driver")) + + else: + # list directory /opt/weka/dist/version + # assert single json file and take json filename + version = await get_weka_version() + download_cmds = [ + (f"weka driver download --from '{DIST_SERVICE}' --without-agent --version {version}", "Downloading drivers") + ] + load_cmds = [ + (f"rmmod wekafsio || echo could not unload old wekafsio driver, still trying to proceed", + "unloading wekafsio"), + (f"rmmod wekafsgw || echo could not unload old wekafsgw driver, still trying to proceed", + "unloading wekafsgw"), + (f"weka driver install --without-agent --version {version}", "loading drivers"), + ] + if not should_skip_uio_pci_generic(): + load_cmds.append(( + f"lsmod | grep -w uio_pci_generic || insmod /opt/weka/dist/drivers/uio_pci_generic-{UIO_PCI_GENERIC_DRIVER_VERSION}-$(uname -r).$(uname -m).ko", + "loading uio_pci_generic driver")) + + # load vfio-pci if not loaded and iommu groups are present + cmd = '[ "$(ls -A /sys/kernel/iommu_groups/)" ] && lsmod | grep -w vfio_pci || modprobe vfio-pci' + _, stderr, ec = await run_command(cmd) + if ec != 0: + logging.error(f"Failed to load vfio-pci {stderr.decode('utf-8')}: exc={ec}, last command: {cmd}") + raise Exception(f"Failed to load vfio-pci: {stderr}") + + logging.info("Downloading and loading drivers") + for cmd, desc in download_cmds + load_cmds: + logging.info(f"Driver loading step: {desc}") + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + logging.error(f"Failed to load drivers {stderr.decode('utf-8')}: exc={ec}, last command: {cmd}") + raise Exception(f"Failed to load drivers: {stderr.decode('utf-8')}") + logging.info("All drivers loaded successfully") + + +async def copy_drivers(): + if WEKA_DRIVERS_HANDLING: + return + + weka_driver_version = version_params.get('wekafs') + assert weka_driver_version + + stdout, stderr, ec = await run_command(dedent(f""" + mkdir -p /opt/weka/dist/drivers + cp /opt/weka/data/weka_driver/{weka_driver_version}/$(uname -r)/wekafsio.ko /opt/weka/dist/drivers/weka_driver-wekafsio-{weka_driver_version}-$(uname -r).$(uname -m).ko + cp /opt/weka/data/weka_driver/{weka_driver_version}/$(uname -r)/wekafsgw.ko /opt/weka/dist/drivers/weka_driver-wekafsgw-{weka_driver_version}-$(uname -r).$(uname -m).ko + + cp /opt/weka/data/igb_uio/{IGB_UIO_DRIVER_VERSION}/$(uname -r)/igb_uio.ko /opt/weka/dist/drivers/igb_uio-{IGB_UIO_DRIVER_VERSION}-$(uname -r).$(uname -m).ko + cp /opt/weka/data/mpin_user/{MPIN_USER_DRIVER_VERSION}/$(uname -r)/mpin_user.ko /opt/weka/dist/drivers/mpin_user-{MPIN_USER_DRIVER_VERSION}-$(uname -r).$(uname -m).ko + {"" if version_params.get('uio_pci_generic') == False else f"cp /opt/weka/data/uio_generic/{UIO_PCI_GENERIC_DRIVER_VERSION}/$(uname -r)/uio_pci_generic.ko /opt/weka/dist/drivers/uio_pci_generic-{UIO_PCI_GENERIC_DRIVER_VERSION}-$(uname -r).$(uname -m).ko"} + """)) + if ec != 0: + logging.info(f"Failed to copy drivers post build {stderr}: exc={ec}") + raise Exception(f"Failed to copy drivers post build: {stderr}") + logging.info("done copying drivers") + + +async def cos_build_drivers(): + weka_driver_version = version_params["wekafs"] + weka_driver_file_version = weka_driver_version.rsplit("-", 1)[0] + mpin_driver_version = version_params["mpin_user"] + igb_uio_driver_version = version_params["igb_uio"] + uio_pci_generic_driver_version = version_params.get("uio_pci_generic", "1.0.0-929f279ce026ddd2e31e281b93b38f52") + weka_driver_squashfs = f'/opt/weka/dist/image/weka-driver-{weka_driver_file_version}.squashfs' + mpin_driver_squashfs = f'/opt/weka/dist/image/driver-mpin-user-{mpin_driver_version}.squashfs' + igb_uio_driver_squashfs = f'/opt/weka/dist/image/driver-igb-uio-{igb_uio_driver_version}.squashfs' + uio_pci_driver_squashfs = f'/opt/weka/dist/image/driver-uio-pci-generic-{uio_pci_generic_driver_version}.squashfs' + logging.info(f"Building drivers for Google Container-Optimized OS release {OS_BUILD_ID}") + for cmd, desc in [ + (f"apt-get install -y squashfs-tools", "installing squashfs-tools"), + (f"mkdir -p /opt/weka/data/weka_driver/{weka_driver_version}/$(uname -r)", "downloading weka driver"), + (f"mkdir -p /opt/weka/data/mpin_user/{MPIN_USER_DRIVER_VERSION}/$(uname -r)", "downloading mpin driver"), + (f"mkdir -p /opt/weka/data/igb_uio/{IGB_UIO_DRIVER_VERSION}/$(uname -r)", "downloading igb_uio driver"), + (f"mkdir -p /opt/weka/data/uio_generic/{UIO_PCI_GENERIC_DRIVER_VERSION}/$(uname -r)", + "downloading uio_pci_generic driver"), + (f"unsquashfs -i -f -d /opt/weka/data/weka_driver/{weka_driver_version}/$(uname -r) {weka_driver_squashfs}", + "extracting weka driver"), + (f"unsquashfs -i -f -d /opt/weka/data/mpin_user/{MPIN_USER_DRIVER_VERSION}/$(uname -r) {mpin_driver_squashfs}", + "extracting mpin driver"), + (f"unsquashfs -i -f -d /opt/weka/data/igb_uio/{IGB_UIO_DRIVER_VERSION}/$(uname -r) {igb_uio_driver_squashfs}", + "extracting igb_uio driver"), + ( + f"unsquashfs -i -f -d /opt/weka/data/uio_generic/{UIO_PCI_GENERIC_DRIVER_VERSION}/$(uname -r) {uio_pci_driver_squashfs}", + "extracting uio_pci_generic driver"), + (f"cd /opt/weka/data/weka_driver/{weka_driver_version}/$(uname -r) && /devenv.sh -R {OS_BUILD_ID} -m ", + "building weka driver"), + (f"cd /opt/weka/data/mpin_user/{MPIN_USER_DRIVER_VERSION}/$(uname -r) && /devenv.sh -R {OS_BUILD_ID} -m", + "building mpin driver"), + (f"cd /opt/weka/data/igb_uio/{IGB_UIO_DRIVER_VERSION}/$(uname -r) && /devenv.sh -R {OS_BUILD_ID} -m", + "building igb_uio driver"), + ( + f"cd /opt/weka/data/uio_generic/{UIO_PCI_GENERIC_DRIVER_VERSION}/$(uname -r) && /devenv.sh -R {OS_BUILD_ID} -m", + "building uio_pci_generic driver"), + ]: + logging.info(f"COS driver building step: {desc}") + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + logging.error(f"Failed to build drivers {stderr}: exc={ec}, last command: {cmd}") + raise Exception(f"Failed to build drivers: {stderr}") + + logging.info("Done building drivers") + + +def parse_cpu_allowed_list(path="/proc/1/status"): + with open(path) as file: + for line in file: + if line.startswith("Cpus_allowed_list"): + return expand_ranges(line.strip().split(":\t")[1]) + return [] + + +def expand_ranges(ranges_str): + ranges = [] + for part in ranges_str.split(','): + if '-' in part: + start, end = map(int, part.split('-')) + ranges.extend(list(range(start, end + 1))) + else: + ranges.append(int(part)) + return ranges + + +def read_siblings_list(cpu_index): + path = f"/sys/devices/system/cpu/cpu{cpu_index}/topology/thread_siblings_list" + with open(path) as file: + return expand_ranges(file.read().strip()) + + +@dataclass +class HostInfo: + kubernetes_distro = 'k8s' + os = 'unknown' + os_build_id = '' # this is either COS build ID OR OpenShift version tag, e.g. 415.92.202406111137-0 + + def is_rhcos(self): + return self.os == OS_NAME_REDHAT_COREOS + + def is_cos(self): + return self.os == OS_NAME_GOOGLE_COS + + +def get_host_info(): + raw_data = {} + ret = HostInfo() + with open("/hostside/etc/os-release") as file: + for line in file: + try: + k, v = line.strip().split("=") + except ValueError: + continue + if v: + raw_data[k] = v.strip().replace('"', '') + + ret.os = raw_data.get("ID", "") + + if ret.is_rhcos(): + ret.kubernetes_distro = KUBERNETES_DISTRO_OPENSHIFT + ret.os_build_id = raw_data.get("VERSION", "") + + elif ret.is_cos(): + ret.kubernetes_distro = KUBERNETES_DISTRO_GKE + ret.os_build_id = raw_data.get("BUILD_ID", "") + return ret + + +@lru_cache +def find_full_cores(n): + if CORE_IDS != "auto": + return list(CORE_IDS.split(",")) + + selected_siblings = [] + + available_cores = parse_cpu_allowed_list() + zero_siblings = [] if 0 not in available_cores else read_siblings_list(0) + + for cpu_index in available_cores: + if cpu_index in zero_siblings: + continue + + siblings = read_siblings_list(cpu_index) + if all(sibling in available_cores for sibling in siblings): + if any(sibling in selected_siblings for sibling in siblings): + continue + selected_siblings.append(siblings[0]) # Select one sibling (the first for simplicity) + if len(selected_siblings) == n: + break + + if len(selected_siblings) < n: + logging.error(f"Error: cannot find {n} full cores") + sys.exit(1) + else: + return selected_siblings + + +async def await_agent(): + start = time.time() + agent_timeout = 60 if WEKA_PERSISTENCE_MODE != "global" else 1500 # global usually is remote storage and pre-create of logs file might take much longer + while start + agent_timeout > time.time(): + _, _, ec = await run_command("weka local ps") + if ec == 0: + logging.info("Weka-agent started successfully") + return + await asyncio.sleep(0.3) + logging.info("Waiting for weka-agent to start") + raise Exception(f"Agent did not come up in {agent_timeout} seconds") + + +processes = {} + + +class Daemon: + def __init__(self, cmd, alias): + self.cmd = cmd + self.alias = alias + self.process = None + self.task = None + + async def start(self): + logging.info(f"Starting daemon {self.alias} with cmd {self.cmd}") + self.task = asyncio.create_task(self.monitor()) + return self.task + + async def start_process(self): + logging.info(f"Starting process {self.cmd} for daemon {self.alias}") + self.process = await start_process(self.cmd, self.alias) + logging.info(f"Started process {self.cmd} for daemon {self.alias}") + + async def stop(self): + logging.info(f"Stopping daemon {self.alias}") + if self.task: + self.task.cancel() + try: + await self.task + except asyncio.CancelledError: + pass + await self.stop_process() + + async def stop_process(self): + logging.info(f"Stopping process for daemon {self.alias}") + if self.process: + await stop_process(self.process) + self.process = None + logging.info(f"Stopped process for daemon {self.alias}") + logging.info(f"No process found to stop") + + async def monitor(self): + async def with_pause(): + await asyncio.sleep(3) + + while True: + if self.process: + if self.is_running(): + await with_pause() + continue + else: + logging.info(f"Daemon {self.alias} is not running") + await self.stop_process() + await self.start_process() + + def is_running(self): + if self.process is None: + return False + running = self.process.returncode is None + return running + + +async def start_process(command, alias=""): + """Start a daemon process.""" + # TODO: Check if already exists, not really needed unless actually adding recovery flow + # TODO: Logs are basically thrown away into stdout . wrap agent logs as debug on logging level + process = await asyncio.create_subprocess_shell(command, preexec_fn=os.setpgrp) + # stdout=asyncio.subprocess.PIPE, + # stderr=asyncio.subprocess.PIPE) + logging.info(f"Daemon {alias or command} started with PID {process.pid}") + processes[alias or command] = process + logging.info(f"Daemon started with PID {process.pid} for command {command}") + return process + + +async def run_command(command, capture_stdout=True, log_execution=True, env: dict = None, log_output=True): + # TODO: Wrap stdout of commands via INFO via logging + if log_execution: + logging.info("Running command: " + command) + if capture_stdout: + pipe = asyncio.subprocess.PIPE + else: + pipe = None + process = await asyncio.create_subprocess_shell("set -e\n" + command, + stdout=pipe, + stderr=pipe, env=env) + stdout, stderr = await process.communicate() + if log_execution: + logging.info(f"Command {command} finished with code {process.returncode}") + if stdout and log_output: + logging.info(f"Command {command} stdout: {stdout.decode('utf-8')}") + if stderr and log_output: + logging.info(f"Command {command} stderr: {stderr.decode('utf-8')}") + return stdout, stderr, process.returncode + + +async def run_logrotate(): + stdout, stderr, ec = await run_command("logrotate /etc/logrotate.conf", log_execution=False) + if ec != 0: + raise Exception(f"Failed to run logrotate: {stderr}") + + +async def write_logrotate_config(): + with open("/etc/logrotate.conf", "w") as f: + f.write(dedent(""" + /var/log/syslog /var/log/errors { + size 1M + rotate 10 + missingok + notifempty + compress + delaycompress + postrotate + if [ -f /var/run/syslog-ng.pid ]; then + kill -HUP $(cat /var/run/syslog-ng.pid) + else + echo "syslog-ng.pid not found, skipping reload" >&2 + fi + endscript + } +""")) + + +async def periodic_logrotate(): + while not exiting: + await write_logrotate_config() + await run_logrotate() + await asyncio.sleep(60) + + +async def autodiscover_network_devices(subnet_str) -> List[str]: + """Returns comma-separated list of network devices + that belong to the given subnet. + """ + subnet = ipaddress.ip_network(subnet_str, strict=False) + cmd = f"ip -o addr" + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to discover network devices: {stderr}") + lines = stdout.decode('utf-8').strip().split("\n") + devices = [] + for line in lines: + parts = line.split() + if len(parts) < 4: + continue + + device_name = parts[1] + family = parts[2] + ip_with_cidr = parts[3] + + # Only match address families relevant to the subnet version + if (subnet.version == 4 and family != "inet") or (subnet.version == 6 and family != "inet6"): + continue + + # Strip interface zone ID (e.g., fe80::1%eth0) + ip_str = ip_with_cidr.split("/")[0].split("%")[0] + + try: + ip = ipaddress.ip_address(ip_str) + if ip in subnet: + devices.append(device_name) + except ValueError: + continue # skip invalid IPs + + if not devices: + logging.error(f"No network devices found for subnet {subnet}") + else: + logging.info(f"Discovered network devices for subnet {subnet}: {devices}") + return devices + + +async def resolve_dhcp_net(device): + def subnet_mask_to_prefix_length(subnet_mask): + # Convert subnet mask to binary representation + binary_mask = ''.join([bin(int(octet) + 256)[3:] for octet in subnet_mask.split('.')]) + # Count the number of 1s in the binary representation + prefix_length = binary_mask.count('1') + return prefix_length + + def get_netdev_info(device): + # Create a socket to communicate with the network interface + s = None + try: + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + + # Get the IP address + ip_address = socket.inet_ntoa(fcntl.ioctl( + s.fileno(), + 0x8915, # SIOCGIFADDR + struct.pack('256s', bytes(device[:15], 'utf-8')) + )[20:24]) + + # Get the netmask + netmask = socket.inet_ntoa(fcntl.ioctl( + s.fileno(), + 0x891b, # SIOCGIFNETMASK + struct.pack('256s', bytes(device[:15], 'utf-8')) + )[20:24]) + cidr = subnet_mask_to_prefix_length(netmask) + + # Get the MAC address + info = fcntl.ioctl(s.fileno(), 0x8927, # SIOCGIFHWADDR + struct.pack('256s', bytes(device[:15], 'utf-8'))) + mac_address = ':'.join('%02x' % b for b in info[18:24]) + finally: + if s: + s.close() + + return mac_address, ip_address, cidr + + try: + mac_address, ip_address, cidr = get_netdev_info(device) + except OSError: + raise Exception(f"Failed to get network info for device {device}, no IP address found") + + return f"'{mac_address}/{ip_address}/{cidr}'" + + +async def create_container(): + full_cores = find_full_cores(NUM_CORES) + mode_part = "" + if MODE == "compute": + mode_part = "--only-compute-cores" + elif MODE == "drive": + mode_part = "--only-drives-cores" + elif MODE == "client": + mode_part = "--only-frontend-cores" + elif MODE == "s3": + mode_part = "--only-frontend-cores" + elif MODE == "nfs": + mode_part = "--only-frontend-cores" + + core_str = ",".join(map(str, full_cores)) + logging.info(f"Creating container with cores: {core_str}") + + # read join secret from if file exists /var/run/secrets/weka-operator/operator-user/password + join_secret_cmd = "" + join_secret_flag = "" + if os.path.exists("/var/run/secrets/weka-operator/operator-user/join-secret"): + join_secret_flag = "--join-secret" + if MODE == "client": + join_secret_flag = "--join-token" + join_secret_cmd = "$(cat /var/run/secrets/weka-operator/operator-user/join-secret)" + + global NETWORK_DEVICE + if not NETWORK_DEVICE and SUBNETS: + subnets = SUBNETS.split(",") + devices = [] + for subnet in subnets: + d = await autodiscover_network_devices(subnet) + devices.extend(d) + + NETWORK_DEVICE = ",".join(devices) + + if "aws_" in NETWORK_DEVICE: + devices = [dev.replace("aws_", "") for dev in NETWORK_DEVICE.split(",")] + net_str = " ".join([f"--net {d}" for d in devices]) + " --management-ips " + ",".join(MANAGEMENT_IPS) + elif ',' in NETWORK_DEVICE: + net_str = " ".join([f"--net {d}" for d in NETWORK_DEVICE.split(",")]) + else: + if not NETWORK_DEVICE: + raise Exception("NETWORK_DEVICE not set") + + if is_udp(): + net_str = f"--net udp" + else: + net_str = f"--net {NETWORK_DEVICE}" + + failure_domain = FAILURE_DOMAIN + + # NOTE: client containers are set up in restricted mode by default + # (even if you login as administrator from a restricted client, your permissions will be limited to RegularUser level⁠⁠) + command = dedent(f""" + weka local setup container --name {NAME} --no-start --disable\ + --core-ids {core_str} --cores {NUM_CORES} {mode_part} \ + {net_str} --base-port {PORT} \ + {f"{join_secret_flag} {join_secret_cmd}" if join_secret_cmd else ""} \ + {f"--join-ips {JOIN_IPS}" if JOIN_IPS else ""} \ + {f"--client" if MODE == 'client' else ""} \ + {f"--restricted" if MODE == 'client' and "4.2.7.64" not in IMAGE_NAME else ""} \ + {f"--failure-domain {failure_domain}" if failure_domain else ""} + """) + logging.info(f"Creating container with command: {command}") + stdout, stderr, ec = await run_command(command) + if ec != 0: + raise Exception(f"Failed to create container: {stderr}") + logging.info("Container created successfully") + + +async def configure_traces(): + # { + # "enabled": true, + # "ensure_free_space_bytes": 3221225472, + # "freeze_period": { + # "end_time": "0001-01-01T00:00:00+00:00", + # "retention": 0, + # "start_time": "0001-01-01T00:00:00+00:00" + # }, + # "retention_type": "DEFAULT", + # "version": 1 + # } + global DUMPER_CONFIG_MODE + data = dict(enabled=True, ensure_free_space_bytes=int(ENSURE_FREE_SPACE_GB) * 1024 * 1024 * 1024, + retention_bytes=int(MAX_TRACE_CAPACITY_GB) * 1024 * 1024 * 1024, retention_type="BYTES", version=1, + freeze_period=dict(start_time="0001-01-01T00:00:00+00:00", end_time="0001-01-01T00:00:00+00:00", + retention=0)) + if MODE == 'dist': + data['enabled'] = False + data['retention_bytes'] = 128 * 1024 * 1024 * 1024 + data_string = json.dumps(data) + + if DUMPER_CONFIG_MODE in ["auto", ""]: + DUMPER_CONFIG_MODE = "override" + + if DUMPER_CONFIG_MODE in ["override", "partial-override"]: + command = dedent(f""" + set -e + mkdir -p /opt/weka/k8s-scripts + echo '{data_string}' > /opt/weka/k8s-scripts/dumper_config.json.override + weka local run --container {NAME} mv /opt/weka/k8s-scripts/dumper_config.json.override /data/reserved_space/dumper_config.json.{DUMPER_CONFIG_MODE} + """) + elif DUMPER_CONFIG_MODE == "cluster": + command = f""" + weka local run --container {NAME} rm -f /data/reserved_space/dumper_config.json.override + """ + else: + raise Exception(f"Invalid DUMPER_CONFIG_MODE: {DUMPER_CONFIG_MODE}") + + if command: + stdout, stderr, ec = await run_command(command) + if ec != 0: + raise Exception(f"Failed to configure traces: {stderr}") + logging.info("Traces configured successfully") + + +async def ensure_aws_nics(num: int): + command = dedent(f""" + set -e + mkdir -p /opt/weka/k8s-scripts + weka local run --container {NAME} /weka/go-helpers/cloud-helper ensure-nics -n {num} + """) + stdout, stderr, ec = await run_command(command) + if ec != 0: + raise Exception(f"Failed to ensure NICs: {stderr}") + logging.info("Ensured NICs successfully") + write_results(dict(err=None, ensured=True, nics=json.loads(stdout.decode('utf-8').strip())['metadata']['vnics'][1:])) + + +async def get_containers(): + current_containers, stderr, ec = await run_command("weka local ps --json") + if ec != 0: + raise Exception(f"Failed to list containers: {stderr}") + current_containers = json.loads(current_containers) + return current_containers + + +async def get_weka_local_resources() -> dict: + resources, stderr, ec = await run_command(f"weka local resources --container {NAME} --json", log_output=False) + if ec != 0: + raise Exception(f"Failed to get resources: {stderr}") + return json.loads(resources) + + +def should_recreate_client_container(resources: dict) -> bool: + if resources["base_port"] != PORT: + return True + if resources.get("restricted_client") is not True: + return True + return False + + +def convert_to_bytes(memory: str) -> int: + size_str = memory.upper() + match = re.match(r"(\d+)([KMGTPE]I?B)", size_str) + if not match: + raise ValueError(f"Invalid size format: {size_str}") + + size = int(match.group(1)) + unit = match.group(2) + + multipliers = { + 'B': 1, + 'KB': 10 ** 3, + 'MB': 10 ** 6, + 'GB': 10 ** 9, + 'TB': 10 ** 12, + 'PB': 10 ** 15, + 'EB': 10 ** 18, + 'KIB': 2 ** 10, + 'MIB': 2 ** 20, + 'GIB': 2 ** 30, + 'TIB': 2 ** 40, + 'PIB': 2 ** 50, + 'EIB': 2 ** 60 + } + return size * multipliers[unit] + + +async def ensure_weka_container(): + current_containers = await get_containers() + + if len(current_containers) == 0: + logging.info("no pre-existing containers, creating") + # create container + if MODE in ["compute", "drive", "client", "s3", "nfs"]: + await create_container() + else: + raise NotImplementedError(f"Unsupported mode: {MODE}") + + full_cores = find_full_cores(NUM_CORES) + + # reconfigure containers + logging.info("Container already exists, reconfiguring") + resources = await get_weka_local_resources() + + if MODE == "client" and should_recreate_client_container(resources): + logging.info("Recreating client container") + await run_command("weka local stop --force", capture_stdout=False) + await run_command(f"weka local rm --all --force", capture_stdout=False) + await create_container() + resources = await get_weka_local_resources() + + # TODO: Normalize to have common logic between setup and reconfigure, including between clients and backends + if MODE == "client" and len(resources['nodes']) != (NUM_CORES + 1): + stdout, stderr, ec = await run_command( + f"weka local resources cores -C {NAME} --only-frontend-cores {NUM_CORES} --core-ids {','.join(map(str, full_cores[:NUM_CORES]))}") + if ec != 0: + raise Exception(f"Failed to get frontend cores: {stderr}") + + # TODO: unite with above block as single getter + resources = await get_weka_local_resources() + + if MODE in ["s3", "nfs"]: + resources['allow_protocols'] = True + resources['reserve_1g_hugepages'] = False + resources['excluded_drivers'] = ["igb_uio"] + resources['memory'] = convert_to_bytes(MEMORY) + resources['auto_discovery_enabled'] = False + resources["ips"] = MANAGEMENT_IPS + + # resources["mask_interrupts"] = True + + resources['auto_remove_timeout'] = AUTO_REMOVE_TIMEOUT + + cores_cursor = 0 + for node_id, node in resources['nodes'].items(): + if "MANAGEMENT" in node['roles']: + continue + if CPU_POLICY == "shared": + node['dedicate_core'] = False + node['dedicated_mode'] = "NONE" + node['core_id'] = full_cores[cores_cursor] + cores_cursor += 1 + + # fix/add gateway + if NET_GATEWAY: + if not is_udp(): + # TODO: Multi-nic support with custom gateways + # figure out what is meant here ^ + if len(resources['net_devices']) != 1: + logging.error("Gateway configuration is not supported with multiple or zero NICs") + resources['net_devices'][0]['gateway'] = NET_GATEWAY + + # save resources + resources_dir = f"/opt/weka/k8s-runtime/resources" + os.makedirs(resources_dir, exist_ok=True) + resource_gen = str(uuid.uuid4()) + resource_file = f"{resources_dir}/weka-resources.{resource_gen}.json" + with open(resource_file, "w") as f: + json.dump(resources, f) + # reconfigure containers + stdout, stderr, ec = await run_command(f""" + ln -sf {resource_file} /opt/weka/data/{NAME}/container/resources.json + ln -sf {resource_file} /opt/weka/data/{NAME}/container/resources.json.stable + ln -sf {resource_file} /opt/weka/data/{NAME}/container/resources.json.staging + ln -sf {resource_file} /opt/weka/data/{NAME}/weka-resources.{resource_gen}.json + # at some point weka creates such, basically expecting relative path: 'resources.json.stable -> weka-resources.35fda56d-2ce3-4f98-b77c-a399df0940af.json' + # stable flow might not even be used, and should be fixed on wekapp side + """) + + # cli-based changes + cli_changes = False + if 'aws_' not in NETWORK_DEVICE and not is_udp(): + target_devices = set(NETWORK_DEVICE.split(",")) + if SUBNETS: + target_devices = set(await get_devices_by_subnets(SUBNETS)) + current_devices = set(dev['device'] for dev in resources['net_devices']) + to_remove = current_devices - target_devices + to_add = target_devices - current_devices + for device in to_remove: + stdout, stderr, ec = await run_command(f"weka local resources net -C {NAME} remove {device}") + if ec != 0: + raise Exception(f"Failed to remove net device {device}: {stderr}") + for device in to_add: + stdout, stderr, ec = await run_command(f"weka local resources net -C {NAME} add {device}") + if ec != 0: + raise Exception(f"Failed to add net device {device}: {stderr}") + cli_changes = cli_changes or len(target_devices.difference(current_devices)) + + # applying cli-based changes + if cli_changes: + stdout, stderr, ec = await run_command(f""" + ln -sf `readlink /opt/weka/data/{NAME}/container/resources.json.staging` /opt/weka/data/{NAME}/container/resources.json.stable + ln -sf `readlink /opt/weka/data/{NAME}/container/resources.json.staging` /opt/weka/data/{NAME}/container/resources.json + """) + + + if ec != 0: + raise Exception(f"Failed to import resources: {stderr} \n {stdout}") + + +def get_boot_id(): + with open("/proc/sys/kernel/random/boot_id", "r") as file: + boot_id = file.read().strip() + return boot_id + + +def get_instructions_dir(): + return f"/host-binds/shared/instructions/{POD_ID}/{get_boot_id()}" + + +@dataclass +class ShutdownInstructions: + allow_force_stop: bool = False + allow_stop: bool = False + + +async def get_shutdown_instructions() -> ShutdownInstructions: + if not POD_ID: ## back compat mode for when pod was scheduled without downward api + return ShutdownInstructions() + instructions_dir = get_instructions_dir() + instructions_file = os.path.join(instructions_dir, "shutdown_instructions.json") + + if not os.path.exists(instructions_file): + ret = ShutdownInstructions() + else: + with open(instructions_file, "r") as file: + data = json.load(file) + ret = ShutdownInstructions(**data) + + + if exists("/tmp/.allow-force-stop"): + ret.allow_force_stop = True + if exists("/tmp/.allow-stop"): + ret.allow_stop = True + return ret + + +async def start_weka_container(): + stdout, stderr, ec = await run_command("weka local start") + if ec != 0: + raise Exception(f"Failed to start container: {stderr}") + logging.info("finished applying new config") + logging.info(f"Container reconfigured successfully: {stdout.decode('utf-8')}") + + +async def configure_persistency(): + if not os.path.exists("/host-binds/opt-weka"): + return + + command = dedent(f""" + mkdir -p /opt/weka-preinstalled + # --- save weka image data separately + mount -o bind /opt/weka /opt/weka-preinstalled + # --- WEKA_PERSISTENCE_DIR - is HostPath (persistent volume) + # --- put existing drivers from persistent dir to weka-preinstalled + mkdir -p {WEKA_PERSISTENCE_DIR}/dist/drivers + mount -o bind {WEKA_PERSISTENCE_DIR}/dist/drivers /opt/weka-preinstalled/dist/drivers + mount -o bind {WEKA_PERSISTENCE_DIR} /opt/weka + mkdir -p /opt/weka/dist + # --- put weka dist back on top + mount -o bind /opt/weka-preinstalled/dist /opt/weka/dist + # --- make drivers dir persistent + mount -o bind {WEKA_PERSISTENCE_DIR}/dist/drivers /opt/weka/dist/drivers + + if [ -d /host-binds/boot-level ]; then + BOOT_DIR=/host-binds/boot-level/$(cat /proc/sys/kernel/random/boot_id)/cleanup + mkdir -p $BOOT_DIR + mkdir -p /opt/weka/external-mounts/cleanup + mount -o bind $BOOT_DIR /opt/weka/external-mounts/cleanup + fi + + if [ -d /host-binds/shared ]; then + mkdir -p /host-binds/shared/local-sockets + mkdir -p /opt/weka/external-mounts/local-sockets + mount -o bind /host-binds/shared/local-sockets /opt/weka/external-mounts/local-sockets + fi + + if [ -f /var/run/secrets/weka-operator/wekahome-cacert/cert.pem ]; then + rm -rf /opt/weka/k8s-runtime/vars/wh-cacert + mkdir -p /opt/weka/k8s-runtime/vars/wh-cacert/ + cp /var/run/secrets/weka-operator/wekahome-cacert/cert.pem /opt/weka/k8s-runtime/vars/wh-cacert/cert.pem + chmod 400 /opt/weka/k8s-runtime/vars/wh-cacert/cert.pem + fi + + if [ -d /host-binds/shared-configs ]; then + ENVOY_DIR=/opt/weka/envoy + EXT_ENVOY_DIR=/host-binds/shared-configs/envoy + mkdir -p $ENVOY_DIR + mkdir -p $EXT_ENVOY_DIR + mount -o bind $EXT_ENVOY_DIR $ENVOY_DIR + fi + + mkdir -p {WEKA_K8S_RUNTIME_DIR} + touch {PERSISTENCY_CONFIGURED} + """) + + stdout, stderr, ec = await run_command(command) + if ec != 0: + raise Exception(f"Failed to configure persistency: {stdout} {stderr}") + + logging.info("Persistency configured successfully") + + +async def ensure_weka_version(): + cmd = "weka version | grep '*' || weka version set $(weka version)" + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to set weka version: {stderr}") + logging.info("Weka version set successfully") + + +async def configure_agent(agent_handle_drivers=False): + logging.info(f"reconfiguring agent with handle_drivers={agent_handle_drivers}") + ignore_driver_flag = "false" if agent_handle_drivers else "true" + + env_vars = dict() + + skip_envoy_setup = "" + if MODE == "s3": + skip_envoy_setup = "sed -i 's/skip_envoy_setup=.*/skip_envoy_setup=true/g' /etc/wekaio/service.conf || true" + + if MODE == "envoy": + env_vars['RESTART_EPOCH_WANTED'] = str(int(os.environ.get("envoy_restart_epoch", time.time()))) + env_vars['BASE_ID'] = PORT + + expand_condition_mounts = "" + if MODE in ['envoy', 's3']: + expand_condition_mounts = ",envoy-data" + + drivers_handling_cmd = f""" + # Check if the last line contains the pattern + CONFFILE="/etc/wekaio/service.conf" + PATTERN="skip_driver_install" + if tail -n 1 "$CONFFILE" | grep -q "$PATTERN"; then + sed -i '$d' "$CONFFILE" + fi + + + #TODO: once moving to 4.3+ only switch to ignore_driver_spec. Problem that 4.2 had it in different category + # and check by skip_driver_install is sort of abuse of not anymore existing flag to have something to validate by + if ! grep -q "skip_driver_install" /etc/wekaio/service.conf; then + sed -i "/\\[os\\]/a skip_driver_install={ignore_driver_flag}" /etc/wekaio/service.conf + sed -i "/\\[os\\]/a ignore_driver_spec={ignore_driver_flag}" /etc/wekaio/service.conf + else + sed -i "s/skip_driver_install=.*/skip_driver_install={ignore_driver_flag}/g" /etc/wekaio/service.conf + fi + sed -i "s/ignore_driver_spec=.*/ignore_driver_spec={ignore_driver_flag}/g" /etc/wekaio/service.conf || true + + sed -i "s@external_mounts=.*@external_mounts=/opt/weka/external-mounts@g" /etc/wekaio/service.conf || true + sed -i "s@conditional_mounts_ids=.*@conditional_mounts_ids=etc-hosts,etc-resolv{expand_condition_mounts}@g" /etc/wekaio/service.conf || true + {skip_envoy_setup} + """ + + cmd = dedent(f""" + {drivers_handling_cmd} + sed -i 's/cgroups_mode=auto/cgroups_mode=none/g' /etc/wekaio/service.conf || true + sed -i 's/override_core_pattern=true/override_core_pattern=false/g' /etc/wekaio/service.conf || true + sed -i "s/port=14100/port={AGENT_PORT}/g" /etc/wekaio/service.conf || true + # sed -i "s/serve_static=false/serve_static=true/g" /etc/wekaio/service.conf || true + echo '{{"agent": {{"port": \'{AGENT_PORT}\'}}}}' > /etc/wekaio/service.json + """) + stdout, stderr, ec = await run_command(cmd, env=env_vars) + if ec != 0: + raise Exception(f"Failed to configure agent: {stderr}") + + if MACHINE_IDENTIFIER is not None: + logging.info(f"Setting machine-id {MACHINE_IDENTIFIER}") + os.makedirs("/opt/weka/data/agent", exist_ok=True) + cmd = f"echo '{MACHINE_IDENTIFIER}' > /opt/weka/data/agent/machine-identifier" + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to set machine-id: {stderr}") + logging.info("Agent configured successfully") + + +async def override_dependencies_flag(): + """Hard-code the success marker so that the dist container can start + + Equivalent to: + ```sh + HARDCODED=1.0.0-024f0fdaa33ec66087bc6c5631b85819 + mkdir -p /opt/weka/data/dependencies/HARDCODED/$(uname -r)/ + touch /opt/weka/data/dependencies/HARDCODED/$(uname -r)/successful + ``` + """ + logging.info("overriding dependencies flag") + dep_version = version_params.get('dependencies', DEFAULT_DEPENDENCY_VERSION) + + if WEKA_DRIVERS_HANDLING: + cmd = dedent(""" + mkdir -p /opt/weka/data/dependencies + touch /opt/weka/data/dependencies/skip + """) + else: + cmd = dedent( + f""" + mkdir -p /opt/weka/data/dependencies/{dep_version}/$(uname -r)/ + touch /opt/weka/data/dependencies/{dep_version}/$(uname -r)/successful + """ + ) + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to override dependencies flag: {stderr}") + logging.info("dependencies flag overridden successfully") + + +async def ensure_stem_container(name="dist"): + logging.info("ensuring dist container") + + cmd = dedent(f""" + if [ -d /driver-toolkit-shared ]; then + # Mounting kernel modules from driver-toolkit-shared to dist container + mkdir -p /lib/modules + mkdir -p /usr/src + mount -o bind /driver-toolkit-shared/lib/modules /lib/modules + mount -o bind /driver-toolkit-shared/usr/src /usr/src + fi + + weka local ps | grep {name} || weka local setup container --name {name} --net udp --base-port {PORT} --no-start --disable + """) + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to create dist container: {stderr}") + + logging.info("dist container created successfully") + # wait for container to become running + + +async def start_stem_container(): + logging.info("starting dist container") + # stdout, stderr, ec = await run_command(cmd) + # if ec != 0: + # raise Exception(f"Failed to start dist container: {stderr}") + # ! start_process is deprecated and this is the only place that uses it + # TODO: Revalidate if it needed or can be simple run_command(As it should be) + # TODO: Still broken! hangs if running "weka local start" directly via run_command. zombie process + await start_process( + "weka local start") # weka local start is not returning, so we need to daemonize it, this is a hack that needs to go away + # reason of being stuck: agent tries to authenticate using admin:admin into this stem container, for not known reason + logging.info("stem container started") + + +async def ensure_container_exec(): + logging.info("ensuring container exec") + start = time.time() + while True: + stdout, stderr, ec = await run_command(f"weka local exec --container {NAME} -- ls") + if ec == 0: + break + await asyncio.sleep(1) + if time.time() - start > 300: + raise Exception(f"Failed to exec into container in 5 minutes: {stderr}") + logging.info("container exec ensured") + + +def write_results(results): + logging.info("Writing result into /weka-runtime/results.json, results: \n%s", results) + os.makedirs("/weka-runtime", exist_ok=True) + with open("/weka-runtime/results.json.tmp", "w") as f: + json.dump(results, f) + os.rename("/weka-runtime/results.json.tmp", "/weka-runtime/results.json") + + +async def discovery(): + # TODO: We should move here everything else we need to discover per node + # This might be a good place to discover drives as well, as long we have some selector to discover by + host_info = get_host_info() + data = dict( + is_ht=len(read_siblings_list(0)) > 1, + kubernetes_distro=host_info.kubernetes_distro, + os=host_info.os, + os_build_id=host_info.os_build_id, + schema=DISCOVERY_SCHEMA, + ) + write_results(data) + + +async def install_gsutil(): + logging.info("Installing gsutil") + await run_command("curl https://sdk.cloud.google.com | bash -s -- --disable-prompts") + os.environ["PATH"] += ":/root/google-cloud-sdk/bin" + await run_command("gcloud auth activate-service-account --key-file=$GOOGLE_APPLICATION_CREDENTIALS") + + +async def cleanup_traces_and_stop_dumper(): + while True: + cmd = "weka local exec supervisorctl status | grep RUNNING" + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + logging.info(f"Failed to get supervisorctl status: {stderr}") + await asyncio.sleep(3) + continue + break + + stdout, stderr, ec = await run_command(""" + weka local exec supervisorctl stop weka-trace-dumper + rm -f /opt/weka/traces/*.shard + """) + if ec != 0: + logging.error(f"Failed to cleanup traces: {stderr}") + + +def get_agent_cmd(): + return f"exec /usr/bin/weka --agent --socket-name weka_agent_ud_socket_{AGENT_PORT}" + + +daemons = { + +} + + +# k8s lifecycle/local leadership election + + +def cos_reboot_machine(): + logging.warning("Rebooting the host") + os.sync() + time.sleep(3) # give some time to log the message and sync + os.system("echo b > /hostside/proc/sysrq-trigger") + + +async def is_secure_boot_enabled(): + stdout, stderr, ec = await run_command("dmesg") + return "Secure boot enabled" in stdout.decode('utf-8') + + +async def cos_disable_driver_signing_verification(): + logging.info("Checking if driver signing is disabled") + esp_partition = "/dev/disk/by-partlabel/EFI-SYSTEM" + mount_path = "/tmp/esp" + grub_cfg = "efi/boot/grub.cfg" + sed_cmds = [] + reboot_required = False + + with open("/hostside/proc/cmdline", 'r') as file: + for line in file.readlines(): + logging.info(f"cmdline: {line}") + if "module.sig_enforce" in line: + if "module.sig_enforce=1" in line: + sed_cmds.append(('module.sig_enforce=1', 'module.sig_enforce=0')) + else: + sed_cmds.append(('cros_efi', 'cros_efi module.sig_enforce=0')) + if "loadpin.enabled" in line: + if "loadpin.enabled=1" in line: + sed_cmds.append(('loadpin.enabled=1', 'loadpin.enabled=0')) + else: + sed_cmds.append(('cros_efi', 'cros_efi loadpin.enabled=0')) + if "loadpin.enforce" in line: + if "loadpin.enforce=1" in line: + sed_cmds.append(('loadpin.enforce=1', 'loadpin.enforce=0')) + else: + sed_cmds.append(('cros_efi', 'cros_efi loadpin.enforce=0')) + if sed_cmds: + logging.warning("Must modify kernel parameters") + if WEKA_COS_ALLOW_DISABLE_DRIVER_SIGNING: + logging.warning("Node driver signing configuration has changed, NODE WILL REBOOT NOW!") + else: + raise Exception( + "Node driver signing configuration must be changed, but WEKA_COS_ALLOW_DISABLE_DRIVER_SIGNING is not set to True. Exiting.") + + await run_command(f"mkdir -p {mount_path}") + await run_command(f"mount {esp_partition} {mount_path}") + current_path = os.curdir + try: + os.chdir(mount_path) + for sed_cmd in sed_cmds: + await run_command(f"sed -i 's/{sed_cmd[0]}/{sed_cmd[1]}/g' {grub_cfg}") + reboot_required = True + except Exception as e: + logging.error(f"Failed to modify kernel cmdline: {e}") + raise + finally: + os.chdir(current_path) + await run_command(f"umount {mount_path}") + if reboot_required: + cos_reboot_machine() + else: + logging.info("Driver signing is already disabled") + + +async def cos_configure_hugepages(): + if not is_google_cos(): + logging.debug("Skipping hugepages configuration") + return + + with open("/proc/meminfo", 'r') as meminfo: + for line in meminfo.readlines(): + if "HugePages_Total" in line: + hugepage_count = int(line.split()[1]) + if hugepage_count > 0: + logging.info(f"Node already has {hugepage_count} hugepages configured, skipping") + return + + logging.info("Checking if hugepages are set") + esp_partition = "/dev/disk/by-partlabel/EFI-SYSTEM" + mount_path = "/tmp/esp" + grub_cfg = "efi/boot/grub.cfg" + sed_cmds = [] + reboot_required = False + + current_path = os.curdir + with open("/hostside/proc/cmdline", 'r') as file: + for line in file.readlines(): + logging.info(f"cmdline: {line}") + if "hugepagesz=" in line: + if "hugepagesz=1g" in line.lower() and WEKA_COS_GLOBAL_HUGEPAGE_SIZE == "2m": + sed_cmds.append(('hugepagesz=1g', 'hugepagesz=2m')) + elif "hugepagesz=2m" in line.lower() and WEKA_COS_GLOBAL_HUGEPAGE_SIZE == "1g": + sed_cmds.append(('hugepagesz=2m', 'hugepagesz=1g')) + if "hugepages=" not in line: + # hugepages= is not set at all + sed_cmds.append(('cros_efi', f'cros_efi hugepages={WEKA_COS_GLOBAL_HUGEPAGE_COUNT}')) + elif f"hugepages={WEKA_COS_GLOBAL_HUGEPAGE_COUNT}" not in line and WEKA_COS_ALLOW_HUGEPAGE_CONFIG: + # hugepages= is set but not to the desired value, and we are allowed to change it + sed_cmds.append(('hugepages=[0-9]+', f'hugepages={WEKA_COS_GLOBAL_HUGEPAGE_COUNT}')) + elif f"hugepages={WEKA_COS_GLOBAL_HUGEPAGE_COUNT}" not in line and not WEKA_COS_ALLOW_HUGEPAGE_CONFIG: + logging.info(f"Node hugepages configuration is managed externally, skipping") + if sed_cmds: + logging.warning("Must modify kernel HUGEPAGES parameters") + if WEKA_COS_ALLOW_HUGEPAGE_CONFIG: + logging.warning("Node hugepage configuration has changed, NODE WILL REBOOT NOW!") + else: + raise Exception( + "Node hugepage configuration must be changed, but WEKA_COS_ALLOW_HUGEPAGE_CONFIG is not set to True. Exiting.") + + await run_command(f"mkdir -p {mount_path}") + await run_command(f"mount {esp_partition} {mount_path}") + try: + os.chdir(mount_path) + for sed_cmd in sed_cmds: + await run_command(f"sed -i 's/{sed_cmd[0]}/{sed_cmd[1]}/g' {grub_cfg}") + reboot_required = True + except Exception as e: + logging.error(f"Failed to modify kernel cmdline: {e}") + raise + finally: + os.chdir(current_path) + os.sync() + await run_command(f"umount {mount_path}") + if reboot_required: + cos_reboot_machine() + else: + logging.info(f"Hugepages are already configured to {WEKA_COS_GLOBAL_HUGEPAGE_COUNT}x2m pages") + + +async def disable_driver_signing(): + if not is_google_cos(): + return + logging.info("Ensuring driver signing is disabled") + await cos_disable_driver_signing_verification() + + +SOCKET_NAME = '\0weka_runtime_' + NAME # Abstract namespace socket +WEKA_K8S_RUNTIME_DIR = '/opt/weka/k8s-runtime' +GENERATION_PATH = f'{WEKA_K8S_RUNTIME_DIR}/runtime-generation' +CURRENT_GENERATION = str(time.time()) +PERSISTENCY_CONFIGURED = f'{WEKA_K8S_RUNTIME_DIR}/persistency-configured' + + +def is_udp(): + return NETWORK_DEVICE.lower() == "udp" or UDP_MODE + + +async def write_generation(): + while os.path.exists("/host-binds/opt-weka") and not os.path.exists(PERSISTENCY_CONFIGURED): + logging.info("Waiting for persistency to be configured") + await asyncio.sleep(1) + + logging.info("Writing generation %s", CURRENT_GENERATION) + os.makedirs(WEKA_K8S_RUNTIME_DIR, exist_ok=True) + with open(GENERATION_PATH, 'w') as f: + f.write(CURRENT_GENERATION) + logging.info("current generation: %s", read_generation()) + + +def read_generation(): + try: + with open(GENERATION_PATH, 'r') as f: + ret = f.read().strip() + except Exception as e: + logging.debug("Failed to read generation: %s", e) + ret = "" + return ret + + +async def obtain_lock(): + server = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + server.setblocking(False) + server.bind(SOCKET_NAME) + return server + + +_server = None + + +async def ensure_envoy_container(): + logging.info("ensuring envoy container") + cmd = dedent(f""" + weka local ps | grep envoy || weka local setup envoy + """) + _, _, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to ensure envoy container") + pass + + +def write_file(path, content): + os.makedirs(os.path.dirname(path), exist_ok=True) + with open(path, 'w') as f: + f.write(content) + + +async def is_port_free(port: int) -> bool: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind(('localhost', port)) + return True + except OSError as e: + if e.errno == 98: # Address already in use + logging.debug(f"Port {port} is already in use") + return False + + logging.error(f"Failed to bind to port {port}: {e}") + return False + + +async def get_free_subrange_in_port_range( + base_port: int, + max_port: int, + subrange_size: int, + exclude_ports: Optional[List[int]] = None +) -> Tuple[int, int]: + """Get a subrange of free ports of size subrange_size in the specified port range.""" + exclude_ports = sorted(exclude_ports or []) + free_ports = set() + not_free_ports = set() + + port = base_port + while port <= max_port - subrange_size: + # Skip any subranges that intersect with exclude_ports + for exclude_port in exclude_ports: + if port <= exclude_port < port + subrange_size: + port = exclude_port + 1 + break + else: + subrange_start = port + consecutive_free_count = 0 + + for check_port in range(port, port + subrange_size): + if check_port in free_ports: + consecutive_free_count += 1 + elif check_port in not_free_ports: + break + else: + if await is_port_free(check_port): + free_ports.add(check_port) + consecutive_free_count += 1 + else: + not_free_ports.add(check_port) + break + + if consecutive_free_count == subrange_size: + logging.info(f"Found free subrange: {subrange_start}-{subrange_start + subrange_size - 1}") + return subrange_start, subrange_start + subrange_size - 1 + + # If not all ports in the subrange were free, move to the next port + port += 1 + + raise RuntimeError(f"Could not find a subrange of {subrange_size} free ports in the specified range.") + + +async def get_free_port(base_port: int, max_port: int, exclude_ports: Optional[List[int]] = None) -> int: + for port in range(base_port, max_port): + if exclude_ports and port in exclude_ports: + continue + + if await is_port_free(port): + logging.info(f"Found free port: {port}") + return port + + raise RuntimeError(f"Failed to find free port in range {base_port}-{max_port}") + + +async def ensure_client_ports(): + global PORT, AGENT_PORT + logging.info("Ensuring client ports") + + if parse_port(PORT) > 0 and parse_port(AGENT_PORT) > 0: # we got resources via env, so no need to wait here + await save_weka_ports_data() + return + + base_port = parse_port(BASE_PORT) + port_range = parse_port(PORT_RANGE) + assert base_port > 0, "BASE_PORT is not set" + max_port = base_port + port_range if port_range > 0 else MAX_PORT + + try: + if not parse_port(AGENT_PORT): + p = await get_free_port(base_port, max_port) + AGENT_PORT = f'{p}' + if not parse_port(PORT): + p1, _ = await get_free_subrange_in_port_range(base_port, max_port, WEKA_CONTAINER_PORT_SUBRANGE, + exclude_ports=[int(AGENT_PORT)]) + PORT = f'{p1}' + except RuntimeError as e: + raise Exception(f"Failed to find free ports: {e}") + else: + await save_weka_ports_data() + + +async def save_weka_ports_data(): + write_file("/opt/weka/k8s-runtime/vars/port", str(PORT)) + write_file("/opt/weka/k8s-runtime/vars/agent_port", str(AGENT_PORT)) + logging.info(f"PORT={PORT}, AGENT_PORT={AGENT_PORT}") + + +def parse_port(port_str: str) -> int: + try: + return int(port_str) + except ValueError: + return 0 + + +async def get_requested_drives(): + if not os.path.exists("/opt/weka/k8s-runtime/resources.json"): + return [] + with open("/opt/weka/k8s-runtime/resources.json", "r") as f: + data = json.load(f) + return data.get("drives", []) + + +async def wait_for_resources(): + global PORT, AGENT_PORT, RESOURCES, FAILURE_DOMAIN, NETWORK_DEVICE + + if MODE == 'client': + await ensure_client_ports() + + if MODE not in ['drive', 's3', 'compute', 'nfs', 'envoy', 'client']: + return + + logging.info("waiting for controller to set resources") + + while not os.path.exists("/opt/weka/k8s-runtime/resources.json"): + logging.info("waiting for /opt/weka/k8s-runtime/resources.json") + await asyncio.sleep(3) + if (await get_shutdown_instructions()).allow_stop: + raise Exception("Shutdown requested") + continue + + with open("/opt/weka/k8s-runtime/resources.json", "r") as f: + data = json.load(f) + + logging.info("found resources.json: %s", data) + net_devices = ",".join(data.get("netDevices",[])) + if net_devices and "aws_" in net_devices: + NETWORK_DEVICE = net_devices + + if data.get("machineIdentifier"): + logging.info("found machineIdentifier override, applying") + global MACHINE_IDENTIFIER + MACHINE_IDENTIFIER = data.get("machineIdentifier") + if MODE == "client": + return + + RESOURCES = data + if "failureDomain" in data: + FAILURE_DOMAIN = data["failureDomain"] + logging.info("Failure Domain: %s", FAILURE_DOMAIN) + if parse_port(PORT) == 0 and MODE != 'envoy': + PORT = data["wekaPort"] + if parse_port(AGENT_PORT) == 0: + AGENT_PORT = data["agentPort"] + + await save_weka_ports_data() + + +async def get_single_device_ip(device_name: str = "default") -> str: + if device_name == "default": + if IS_IPV6: + cmd = "ip -6 addr show $(ip -6 route show default | awk '{print $5}' | head -n1) | grep 'inet6 ' | grep global | awk '{print $2}' | cut -d/ -f1" + else: + cmd = "ip route show default | grep src | awk '/default/ {print $9}' | head -n1" + else: + if IS_IPV6: + # use ULA/GUA address for ipv6 (WEKA does not support link-local addresses) + cmd = f"ip -6 addr show dev {device_name} | grep -E 'inet6 (fd|2)' | head -n1 | awk '{{print $2}}' | cut -d/ -f1" + else: + cmd = f"ip addr show dev {device_name} | grep 'inet ' | awk '{{print $2}}' | cut -d/ -f1" + + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to get ip address for device {device_name}: {stderr}") + ip = stdout.decode('utf-8').strip() + + # try again with a different command for default + if not ip and device_name == "default": + # TODO: support ipv6 in this case + if not IS_IPV6: + cmd = "ip -4 addr show dev $(ip route show default | awk '{print $5}') | grep inet | awk '{print $2}' | cut -d/ -f1" + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to get ip address for device {device_name}: {stderr}") + ip = stdout.decode('utf-8').strip() + + if not ip: + raise Exception(f"Failed to get ip address for device {device_name}") + return ip + + +async def get_devices_by_subnets(subnets): + devices = [] + for subnet in subnets.split(","): + for d in await autodiscover_network_devices(subnet): + devices.append(d) + logging.info("found devices by subnets(%s): %s", subnets, devices) + return devices + + +async def write_management_ips(): + """Auto-discover management IPs and write them to a file""" + if MODE not in ['drive', 'compute', 's3', 'nfs', 'client']: + return + + ipAddresses = [] + + if os.environ.get("MANAGEMENT_IP") and "aws_" in NETWORK_DEVICE: + ipAddresses.append(os.environ.get("MANAGEMENT_IP")) + elif not NETWORK_DEVICE and SUBNETS: + devices = await get_devices_by_subnets(SUBNETS) + for device in devices: + ip = await get_single_device_ip(device) + ipAddresses.append(ip) + # default udp mode (if network device is not set explicitly) + elif is_udp(): + if NETWORK_DEVICE != 'udp': + ip = await get_single_device_ip(NETWORK_DEVICE) + else: + ip = await get_single_device_ip() + ipAddresses.append(ip) + # if single nic is used + elif ',' not in NETWORK_DEVICE: + ip = await get_single_device_ip(NETWORK_DEVICE) + ipAddresses.append(ip) + # if multiple nics are used + else: + devices = NETWORK_DEVICE.split(",") + for device in devices: + ip = await get_single_device_ip(device) + ipAddresses.append(ip) + + if not ipAddresses: + raise Exception("Failed to discover management IPs") + + with open("/opt/weka/k8s-runtime/management_ips.tmp", "w") as f: + f.write("\n".join(ipAddresses)) + os.rename("/opt/weka/k8s-runtime/management_ips.tmp", "/opt/weka/k8s-runtime/management_ips") + + logging.info(f"Management IPs: {ipAddresses}") + global MANAGEMENT_IPS + MANAGEMENT_IPS = ipAddresses + + +async def ensure_drives(): + sys_drives = await find_weka_drives() + requested_drives = RESOURCES.get("drives", []) + drives_to_setup = [] + for drive in requested_drives: + for sd in sys_drives: + if sd["serial_id"] == drive: + drives_to_setup.append(sd["block_device"]) + break + # else: + # raise Exception(f"Drive {drive['serial_id']} not found") + + # write discovered drives into runtime dir + os.makedirs("/opt/weka/k8s-runtime", exist_ok=True) + with open("/opt/weka/k8s-runtime/drives.json", "w") as f: + json.dump([d for d in sys_drives if d['serial_id'] in requested_drives], f) + logging.info(f"sys_drives: {sys_drives}") + logging.info(f"requested_drives: {requested_drives}") + logging.info(f"in-kernel drives are: {drives_to_setup}") + + +is_legacy_driver_command = None + + +async def is_legacy_driver_cmd() -> bool: + global is_legacy_driver_command + if is_legacy_driver_command is not None: + return is_legacy_driver_command + cmd = "weka driver --help | grep pack" + stdout, stderr, ec = await run_command(cmd) + if ec == 0: + logging.info("Driver pack command is available, new dist mode") + is_legacy_driver_command = False + return False + logging.info("Driver pack command is not available, legacy dist mode") + is_legacy_driver_command = True + return True + + +async def pack_drivers(): + logging.info("Packing drivers") + cmd = "weka driver pack" + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to pack drivers: {stderr}") + logging.info("Drivers packed successfully") + + +async def run_prerun_script(): + pre_run_script = os.environ.get("PRE_RUN_SCRIPT") + if not pre_run_script: + return + # decode base64 + pre_run_script = base64.b64decode(pre_run_script).decode('utf-8') + logging.info(f"Running pre-run script: {pre_run_script}") + # save script into tmp script file + with open("/tmp/pre-run-script.sh", "w") as f: + f.write(pre_run_script) + # run script + cmd = "bash /tmp/pre-run-script.sh" + stdout, stderr, ec = await run_command(cmd, capture_stdout=False) + if ec != 0: + raise Exception(f"Failed to run pre-run script: {stderr}") + + +async def umount_drivers(): + #TODO: Should support specific container id + logging.info("Umounting driver") + find_mounts_cmd = "nsenter --mount --pid --target 1 -- mount -t wekafs | awk '{print $3}'" + stdout, stderr, ec = await run_command(find_mounts_cmd) + if ec != 0: + logging.info(f"Failed to find weka mounts: {stderr} {stdout}") + + errs = [] + umounted_paths = [] + + for mount in stdout.decode('utf-8').split("\n"): + if not mount: + continue + umount_cmd = f"nsenter --mount --pid --target 1 -- umount {mount}" + stdout, stderr, ec = await run_command(umount_cmd) + errs.append(stderr) + if ec != 0: + continue + umounted_paths.append(mount) + + # after umounts without errors we should succeed to rmmod, be that true or not - attempting + if len(errs) == 0: + stdout, stderr, ec = await run_command(""" + if lsmod | grep wekafsio; then + rmmod wekafsio + fi + """ + ) + if ec != 0: + errs.append(stderr) + + logging.info("weka mounts umounted successfully") + write_results(dict( + error=errs, + umounted_paths = umounted_paths, + )) + +async def main(): + host_info = get_host_info() + global OS_DISTRO, OS_BUILD_ID + OS_DISTRO = host_info.os + logging.info(f'OS_DISTRO={OS_DISTRO}') + + OS_BUILD_ID = host_info.os_build_id + + if not OS_BUILD_ID and is_google_cos(): + raise Exception("OS_BUILD_ID is not set") + if is_google_cos(): + logging.info(f'OS_BUILD_ID={OS_BUILD_ID}') + + if MODE == "discovery": + # self signal to exit + logging.info("discovery mode") + await cos_configure_hugepages() + await discovery() + return + + if MODE == "drivers-loader": + # self signal to exit + await override_dependencies_flag() + # 2 minutes timeout for driver loading + end_time = time.time() + 120 + await disable_driver_signing() + loaded = False + while time.time() < end_time: + try: + await load_drivers() + write_results(dict( + err=None, + drivers_loaded=True + )) + logging.info("Drivers loaded successfully") + loaded = True + return + except Exception as e: + await asyncio.sleep(5) + if time.time() > end_time: + write_results(dict( + err=getattr(e, 'message', repr(e)), + drivers_loaded=False, + )) + # return (not raise) to avoid infinite container restarts in the pod + return + logging.info("retrying drivers download... will reach timeout in %d seconds", end_time - time.time()) + if not loaded: + raise Exception("Failed to load drivers") + return + + await configure_persistency() + await wait_for_resources() + await write_generation() # write own generation to kill other processes + await write_management_ips() + global _server + _server = await obtain_lock() # then waiting for lock with short timeout + + if MODE != "adhoc-op": # this can be specialized container that should not have agent + await configure_agent() + syslog = Daemon("/usr/sbin/syslog-ng -F -f /etc/syslog-ng/syslog-ng.conf --pidfile /var/run/syslog-ng.pid", "syslog") + await syslog.start() + + await override_dependencies_flag() + if MODE not in ["dist", "drivers-dist", "drivers-loader", "drivers-builder", "adhoc-op-with-container", "envoy", + "adhoc-op"]: + await ensure_drivers() + + if MODE != "adhoc-op": + agent_cmd = get_agent_cmd() + agent = Daemon(agent_cmd, "agent") + await agent.start() + await await_agent() + await ensure_weka_version() + + if MODE == "drivers-dist": + # Dist is only serving, we will invoke downloads on it, probably in stand-alone ad-hoc container, but never actually build + # if DIST_LEGACY_MODE: + logging.info("dist-service flow") + await ensure_stem_container("dist") + await configure_traces() + await start_stem_container() + await cleanup_traces_and_stop_dumper() + return + + if MODE == "adhoc-op-with-container": + global NAME + NAME = "adhoc" + await ensure_stem_container(NAME) + await configure_traces() + await start_stem_container() + await ensure_container_exec() + instruction = json.loads(INSTRUCTIONS) + logging.info(f"adhoc-op-with-container instruction: {instruction}") + payload = json.loads(instruction['payload']) + if instruction.get('type') == 'ensure-nics': + if payload.get('type') == "aws": + await ensure_aws_nics(payload['dataNICsNumber']) + return + else: + raise ValueError(f"Ensure NICs instruction type not supported: {payload.get('type')}") + else: + raise ValueError(f"unsupported instruction: {instruction.get('type')}") + + if MODE == "adhoc-op": + instruction = json.loads(INSTRUCTIONS) + if instruction.get('type') and instruction['type'] == "discover-drives": + await discover_drives() + elif instruction.get('type') and instruction['type'] == 'force-resign-drives': + logging.info(f"force-resign-drives instruction: {instruction}") + payload = json.loads(instruction['payload']) + device_paths = payload.get('devicePaths', []) + device_serials = payload.get('deviceSerials', []) + if device_paths: + await force_resign_drives_by_paths(device_paths) + elif device_serials: + await force_resign_drives_by_serials(device_serials) + elif instruction.get('type') and instruction['type'] == 'sign-drives': + logging.info(f"sign-drives instruction: {instruction}") + payload = json.loads(instruction['payload']) + signed_drives = await sign_drives(payload) + logging.info(f"signed_drives: {signed_drives}") + await asyncio.sleep(3) # a hack to give kernel a chance to update paths, as it's not instant + await discover_drives() + elif instruction.get('type') and instruction['type'] == 'debug': + # TODO: Wrap this as conditional based on payload, as might fail in some cases + raw_disks = await find_disks() + logging.info(f"Raw disks: {raw_disks}") + # TODO: Should we support generic command proxy? security concern? + elif instruction.get('type') and instruction['type'] == 'umount': + logging.info(f"umounting wekafs mounts") + await umount_drivers() + else: + raise ValueError(f"Unsupported instruction: {INSTRUCTIONS}") + return + + # de-facto, both drivers-builder and dist right now are doing "build and serve" + if MODE in ["dist", "drivers-builder"]: + DIST_LEGACY_MODE = await is_legacy_driver_cmd() + logging.info("dist-service flow") + if is_google_cos(): + await install_gsutil() + await cos_build_drivers() + + elif DIST_LEGACY_MODE: # default + await agent.stop() + await configure_agent(agent_handle_drivers=True) + await agent.start() # here the build happens + await await_agent() + + await ensure_stem_container("dist") + await configure_traces() + if not DIST_LEGACY_MODE: + # there might be a better place for preRunScript, but it is needed just for driver now + await run_prerun_script() + await pack_drivers() # explicit pack of drivers if supported, which is new method, that should become default with rest of code removed eventually + else: + await agent.stop() + await configure_agent(agent_handle_drivers=False) + await agent.start() + await await_agent() + + if DIST_LEGACY_MODE: + await copy_drivers() + await start_stem_container() + await cleanup_traces_and_stop_dumper() + weka_version, _, _ = await run_command("weka version current") + write_results( + { + "driver_built": True, + "err": "", + "weka_version": weka_version.decode().strip(), + "kernel_signature": await get_kernel_signature(weka_pack_supported=not DIST_LEGACY_MODE, + weka_drivers_handling=WEKA_DRIVERS_HANDLING), + "weka_pack_not_supported": DIST_LEGACY_MODE, + "no_weka_drivers_handling": not WEKA_DRIVERS_HANDLING, + }) + return + + if MODE == "envoy": + await ensure_envoy_container() + return + + await ensure_weka_container() + await configure_traces() + await start_weka_container() + await ensure_container_exec() + logging.info("Container is UP and running") + if MODE == "drive": + await ensure_drives() + + +async def get_kernel_signature(weka_pack_supported=False, weka_drivers_handling=False): + if not weka_drivers_handling: + return "" + + cmd = "" + if weka_pack_supported: + cmd = "weka driver kernel 2>&1 | awk '{printf \"%s\", $NF}'" + else: + # tr -d '\0' is needed to remove null character from the end of output + cmd = "weka driver kernel-sig 2>&1 | awk '{printf \"%s\", $NF}' | tr -d '\\0'" + + stdout, stderr, ec = await run_command(cmd) + if ec != 0: + raise Exception(f"Failed to get kernel signature: {stderr}") + + res = stdout.decode().strip() + assert res, "Kernel signature not found" + return res + + +async def stop_process(process): + logging.info(f"stopping daemon with pid {process.pid} (via process group), {process}") + + async def cleanup_process(): + for k, v in list(processes.items()): + if v == process: + logging.info(f"removing process {k}") + del processes[k] + logging.info(f"waiting for process {process.pid} to exit") + await process.wait() + logging.info(f"process {process.pid} exited") + + if process.returncode is not None: + await cleanup_process() + return + + pgid = os.getpgid(process.pid) + logging.info(f"stopping process group {pgid}") + os.killpg(pgid, signal.SIGTERM) + logging.info(f"process group {pgid} stopped") + await cleanup_process() + + +def is_wrong_generation(): + if MODE in ['drivers-loader', 'discovery']: + return False + + current_generation = read_generation() + if current_generation == "": + return False + + if current_generation != CURRENT_GENERATION: + logging.error("Wrong generation detected, exiting, current:%s, read: %s", CURRENT_GENERATION, read_generation()) + return True + return False + + +async def takeover_shutdown(): + await run_command("weka local stop --force", capture_stdout=False) + + +def get_active_mounts(file_path="/proc/wekafs/interface") -> int: + """Get the number of active mounts from the specified file. + Return -1 if the number of active mounts cannot be determined. + """ + try: + with open(file_path, "r") as file: + for line in file: + if line.startswith("Active mounts:"): + # Extract the number after "Active mounts:" + return int(line.split(":")[1].strip()) + except FileNotFoundError: + logging.error(f"File '{file_path}' not found.") + except ValueError: + logging.error(f"Failed to parse the number of active mounts.") + except Exception as e: + logging.error(f"Failed to get the number of active mounts: {e}") + return -1 + + +async def wait_for_shutdown_instruction(): + while True: + shutdown_instructions = await get_shutdown_instructions() + + if shutdown_instructions.allow_force_stop: + logging.info("Received 'allow-force-stop' instruction") + return + if shutdown_instructions.allow_stop: + logging.info("Received 'allow-stop' instruction") + return + + logging.info("Waiting for shutdown instruction...") + await asyncio.sleep(5) + + +async def watch_for_force_shutdown(): + while True: + if (await get_shutdown_instructions()).allow_force_stop: + logging.info("Received 'allow-force-stop' instruction") + await run_command("weka local stop --force", capture_stdout=False) + return + await asyncio.sleep(5) + + +async def is_container_running(no_agent_as_not_running=False): + try: + containers = await get_containers() + except Exception as e: + if no_agent_as_not_running: + logging.exception("agent error, due to force stop - assuming container is not running") + return False + else: + logging.exception("agent error, since no force stop - assuming container is running") + return True + for container in containers: + if container['name'] == NAME: + if container['runStatus'] == "Stopped": + return False + return True + return False + + +async def shutdown(): + global exiting + while not (exiting or is_wrong_generation()): + await asyncio.sleep(1) + continue + + logging.warning("Received signal, stopping all processes") + exiting = True # multiple entry points of shutdown, exiting is global check for various conditions + + if MODE not in ["drivers-loader", "discovery", "ensure-nics"]: + if MODE in ["client", "s3", "nfs", "drive", "compute"]: + await wait_for_shutdown_instruction() + + force_stop = False + if (await get_shutdown_instructions()).allow_force_stop: + force_stop = True + if is_wrong_generation(): + force_stop = True + if MODE not in ["s3", "drive", "compute", "nfs"]: + force_stop = True + stop_flag = "--force" if force_stop else "-g" + + force_shutdown_task = None + if "--force" not in stop_flag: + force_shutdown_task = asyncio.create_task(watch_for_force_shutdown()) + + while await is_container_running(no_agent_as_not_running=force_stop): + await run_command(f"weka local stop {stop_flag}", capture_stdout=False) + if force_shutdown_task is not None: + force_shutdown_task.cancel() + logging.info("finished stopping weka container") + + if MODE == "drive": + timeout = 60 + # print out in-kernel devices for up to 60 seconds every 0.3 seconds + requested_drives = await get_requested_drives() + logging.info(f"Waiting for {len(requested_drives)} requested drives to return to kernel: {requested_drives}") + + for _ in range(int(timeout / 0.3)): + drives = await find_weka_drives() + logging.info(f"Found {len(drives)}: {drives}") + in_kernel_drives_serials = [d['serial_id'] for d in drives] + + requested_drives_returned = True + for requested_serial in requested_drives: + if requested_serial not in in_kernel_drives_serials: + logging.info(f"Requested drive {requested_serial} not found in kernel drives") + requested_drives_returned = False + + if requested_drives_returned: + logging.info("All requested drives returned to kernel") + break + + await asyncio.sleep(0.3) + + for key, process in dict(processes.items()).items(): + logging.info(f"stopping process {process.pid}, {key}") + await stop_process(process) + logging.info(f"process {process.pid} stopped") + + tasks = [t for t in asyncio.all_tasks(loop) if t is not asyncio.current_task(loop)] + [task.cancel() for task in tasks] + + logging.info("All processes stopped, stopping main loop") + loop.stop() + logging.info("Main loop stopped") + + +exiting = False + + +def signal_handler(sig): + global exiting + logging.info(f"Received signal {sig}") + exiting = True + + +def reap_zombies(): + # agent leaves zombies behind on weka local start + while True: + time.sleep(1) + try: + # Wait for any child process, do not block + pid, _ = os.waitpid(-1, os.WNOHANG) + if pid == 0: # No zombie to reap + continue + except ChildProcessError: + # No child processes + continue + + +zombie_collector = threading.Thread(target=reap_zombies, daemon=True) +zombie_collector.start() + +# Setup signal handler for graceful shutdown +loop.add_signal_handler(signal.SIGINT, partial(signal_handler, "SIGINT")) +loop.add_signal_handler(signal.SIGTERM, partial(signal_handler, "SIGTERM")) + +shutdown_task = loop.create_task(shutdown()) +takeover_shutdown_task = loop.create_task(takeover_shutdown()) + +main_loop = loop.create_task(main()) +if MODE not in ["adhoc-op"]: + logrotate_task = loop.create_task(periodic_logrotate()) + +try: + try: + loop.run_until_complete(main_loop) + loop.run_forever() + except RuntimeError: + if exiting: + logging.info("Cancelled") + else: + raise +finally: + if _server is not None: + _server.close() + debug_sleep = int(os.environ.get("WEKA_OPERATOR_DEBUG_SLEEP", 3)) + logging.info(f"{debug_sleep} seconds exit-sleep to allow for debugging and ensure proper sync") + start = time.time() + while time.time() - start < debug_sleep: + if os.path.exists("/tmp/.cancel-debug-sleep"): + break + time.sleep(1) diff --git a/weka-operator/weka-operator/templates/NOTES.txt b/weka-operator/weka-operator/templates/NOTES.txt new file mode 100644 index 00000000..4c124b85 --- /dev/null +++ b/weka-operator/weka-operator/templates/NOTES.txt @@ -0,0 +1,8 @@ +Chart: {{ .Chart.Name }} +Release: {{ .Release.Name }} +``` +{{- if .Values.gkeCompatibility.hugepageConfiguration.enabled}} +WARNING: Automatic setup of hugepages on GKE nodes is enabled. This will + cause a forceful reboot of all nodes in the cluster upon first installation of any Weka client! + Please ensure that this is acceptable before proceeding. +{{- end}} diff --git a/weka-operator/weka-operator/templates/_helpers.tpl b/weka-operator/weka-operator/templates/_helpers.tpl new file mode 100644 index 00000000..8165d886 --- /dev/null +++ b/weka-operator/weka-operator/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "weka-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "weka-operator.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "weka-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "weka-operator.labels" -}} +helm.sh/chart: {{ include "weka-operator.chart" . }} +{{ include "weka-operator.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "weka-operator.selectorLabels" -}} +app.kubernetes.io/name: {{ include "weka-operator.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "weka-operator.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "weka-operator.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/weka-operator/weka-operator/templates/auth_proxy_client_clusterrole.yaml b/weka-operator/weka-operator/templates/auth_proxy_client_clusterrole.yaml new file mode 100644 index 00000000..111e63a3 --- /dev/null +++ b/weka-operator/weka-operator/templates/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/part-of: weka-operator + app.kubernetes.io/managed-by: kustomize + name: "{{ .Values.prefix }}-metrics-reader" +rules: + - nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/weka-operator/weka-operator/templates/auth_proxy_role.yaml b/weka-operator/weka-operator/templates/auth_proxy_role.yaml new file mode 100644 index 00000000..94fa7b63 --- /dev/null +++ b/weka-operator/weka-operator/templates/auth_proxy_role.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: weka-operator + name: "{{ .Values.prefix }}-proxy-role" +rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/weka-operator/weka-operator/templates/auth_proxy_role_binding.yaml b/weka-operator/weka-operator/templates/auth_proxy_role_binding.yaml new file mode 100644 index 00000000..e244abf0 --- /dev/null +++ b/weka-operator/weka-operator/templates/auth_proxy_role_binding.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: weka-operator + name: "{{ .Values.prefix }}-proxy-rolebinding" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "{{ .Values.prefix }}-proxy-role" +subjects: + - kind: ServiceAccount + name: "{{ .Values.prefix }}-controller-manager" + namespace: "{{ .Release.Namespace }}" diff --git a/weka-operator/weka-operator/templates/auth_proxy_service.yaml b/weka-operator/weka-operator/templates/auth_proxy_service.yaml new file mode 100644 index 00000000..bc728ab0 --- /dev/null +++ b/weka-operator/weka-operator/templates/auth_proxy_service.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/name: service + app.kubernetes.io/part-of: weka-operator + control-plane: controller-manager + name: "{{ .Values.prefix }}-controller-manager-metrics-service" +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager diff --git a/weka-operator/weka-operator/templates/client_editor_role.yaml b/weka-operator/weka-operator/templates/client_editor_role.yaml new file mode 100644 index 00000000..010b3b41 --- /dev/null +++ b/weka-operator/weka-operator/templates/client_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit clients. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: client-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/part-of: weka-operator + app.kubernetes.io/managed-by: kustomize + name: client-editor-role +rules: +- apiGroups: + - weka.weka.io + resources: + - clients + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - weka.weka.io + resources: + - clients/status + verbs: + - get diff --git a/weka-operator/weka-operator/templates/client_viewer_role.yaml b/weka-operator/weka-operator/templates/client_viewer_role.yaml new file mode 100644 index 00000000..1aa4e5ff --- /dev/null +++ b/weka-operator/weka-operator/templates/client_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view clients. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: client-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/part-of: weka-operator + app.kubernetes.io/managed-by: kustomize + name: client-viewer-role +rules: +- apiGroups: + - weka.weka.io + resources: + - clients + verbs: + - get + - list + - watch +- apiGroups: + - weka.weka.io + resources: + - clients/status + verbs: + - get diff --git a/weka-operator/weka-operator/templates/leader_election_role.yaml b/weka-operator/weka-operator/templates/leader_election_role.yaml new file mode 100644 index 00000000..ec6346b0 --- /dev/null +++ b/weka-operator/weka-operator/templates/leader_election_role.yaml @@ -0,0 +1,44 @@ +--- +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/name: role + app.kubernetes.io/part-of: weka-operator + name: "{{ .Values.prefix }}-leader-election-role" +rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/weka-operator/weka-operator/templates/leader_election_role_binding.yaml b/weka-operator/weka-operator/templates/leader_election_role_binding.yaml new file mode 100644 index 00000000..5d947130 --- /dev/null +++ b/weka-operator/weka-operator/templates/leader_election_role_binding.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/name: rolebinding + app.kubernetes.io/part-of: weka-operator + name: "{{ .Values.prefix }}-leader-election-rolebinding" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "{{ .Values.prefix }}-leader-election-role" +subjects: + - kind: ServiceAccount + name: "{{ .Values.prefix }}-controller-manager" + namespace: "{{ .Release.Namespace }}" diff --git a/weka-operator/weka-operator/templates/maintenance_service_account.yaml b/weka-operator/weka-operator/templates/maintenance_service_account.yaml new file mode 100644 index 00000000..adc50d2c --- /dev/null +++ b/weka-operator/weka-operator/templates/maintenance_service_account.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/part-of: weka-operator + name: "{{ .Values.prefix }}-maintenance" diff --git a/weka-operator/weka-operator/templates/manager.yaml b/weka-operator/weka-operator/templates/manager.yaml new file mode 100644 index 00000000..51aaae22 --- /dev/null +++ b/weka-operator/weka-operator/templates/manager.yaml @@ -0,0 +1,247 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: "{{ .Values.prefix }}-controller-manager" + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/name: deployment + app.kubernetes.io/part-of: weka-operator + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: {{ if eq .Values.deployController true }} 1 {{else}} 0 {{end}} + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + prometheus.io/port: "8080" + prometheus.io/scrape: "true" + prometheus.io/path: "/metrics" + labels: + control-plane: controller-manager + app.kubernetes.io/component: weka-operator + app.kubernetes.io/created-by: weka-operator + app: weka-operator + {{- if .Values.manager.labels }} + {{- toYaml .Values.manager.labels | nindent 8 }} + {{- end }} + spec: + securityContext: + runAsNonRoot: false + imagePullSecrets: + - name: "{{ .Values.imagePullSecret }}" + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: kubernetes.io/os + operator: In + values: + - linux + tolerations: + {{- if .Values.manager.tolerations }} + {{- toYaml .Values.manager.tolerations | nindent 8 }} + {{- else if .Values.tolerations }} + {{- toYaml .Values.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.manager.nodeSelector }} + nodeSelector: + {{- toYaml .Values.manager.nodeSelector | nindent 8 }} + {{- end }} + {{- with .Values.dnsPolicy }} + dnsPolicy: {{ .k8sNetwork | default "" }} + {{- end }} + volumes: + - name: tmpdir + emptyDir: { } + containers: + - args: + - --secure-listen-address=0.0.0.0:8443 + - --upstream=http://127.0.0.1:8080/ + - --logtostderr=true + - --v=0 + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 + name: kube-rbac-proxy + ports: + - containerPort: 8443 + name: https + protocol: TCP + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + - command: + - /weka-operator + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: OTEL_DEPLOYMENT_IDENTIFIER + value: "{{ .Values.deploymentIdentifier }}" + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "{{ .Values.otelExporterOtlpEndpoint }}" + - name: VERSION + value: "{{ .Values.Version }}" + - name: WEKA_OPERATOR_WEKA_HOME_ENDPOINT + value: "{{ .Values.wekahome.endpoint }}" + - name: WEKA_OPERATOR_WEKA_HOME_CACERT_SECRET + value: "{{ .Values.wekahome.cacertSecret }}" + - name: WEKA_OPERATOR_WEKA_HOME_INSECURE + value: "{{ .Values.wekahome.allowInsecureTLS }}" + - name: WEKA_OPERATOR_WEKA_HOME_ENABLE_STATS + value: "{{ .Values.wekahome.enableStats }}" + - name: WEKA_OPERATOR_DEBUG_SLEEP + value: "{{ .Values.debugSleep }}" + - name: WEKA_OPERATOR_MAINTENANCE_SA_NAME + value: "{{ .Values.prefix }}-maintenance" + - name: WEKA_MAINTENANCE_IMAGE + value: "{{ .Values.maintenanceImage | default "quay.io/weka.io/busybox" }}" + - name: WEKA_MAINTENANCE_IMAGE_PULL_SECRET + value: "{{ .Values.maintenanceImagePullSecret | default "" }}" + - name: WEKA_OCP_PULL_SECRET + value: "{{ .Values.ocpCompatibility.driverToolkitSecretName }}" + - name: WEKA_OCP_TOOLKIT_IMAGE_BASE_URL + value: "{{ .Values.ocpCompatibility.driverToolkitImageBaseUrl }}" + - name: WEKA_COS_ALLOW_DISABLE_DRIVER_SIGNING + value: "{{ .Values.gkeCompatibility.disableDriverSigning }}" + - name: WEKA_COS_ALLOW_HUGEPAGE_CONFIG + value: "{{ .Values.gkeCompatibility.hugepageConfiguration.enabled}}" + - name: WEKA_COS_GLOBAL_HUGEPAGE_SIZE + value: "{{ .Values.gkeCompatibility.hugepageConfiguration.hugepageSize }}" + - name: WEKA_COS_GLOBAL_HUGEPAGE_COUNT + value: "{{ .Values.gkeCompatibility.hugepageConfiguration.hugepageCount }}" + - name: WEKA_COS_SERVICE_ACCOUNT_SECRET + value: "{{ .Values.gkeCompatibility.gkeServiceAccountSecret }}" + - name: SKIP_UNHEALTHY_TOLERATION + value: "{{ .Values.skipUnhealthyToleration }}" + - name: CLEANUP_REMOVED_NODES + value: "{{ .Values.cleanupRemovedNodes }}" + - name: CLEANUP_BACKENDS_ON_NODE_SELECTOR_MISMATCH + value: "{{ .Values.cleanupOnNodeSelectorMismatch }}" + - name: CLEANUP_CLIENTS_ON_NODE_SELECTOR_MISMATCH + value: "{{ .Values.cleanupClientsOnNodeSelectorMismatch }}" + - name: CLEANUP_CONTAINERS_ON_TOLERATIONS_MISMATCH + value: "{{ .Values.cleanupContainersOnTolerationsMismatch }}" + - name: LOG_LEVEL + value: "{{ .Values.logging.level }}" + - name: LOG_TIME_ONLY + value: "{{ .Values.logging.timeOnly }}" + - name: HEALTH_PROBE_BIND_ADDRESS + value: "{{ .Values.healthProbeBindAddress }}" + - name: OPERATOR_METRICS_BIND_ADDRESS + value: "{{ .Values.operatorMetricsBindAddress }}" + - name: ENABLE_LEADER_ELECTION + value: "{{ .Values.enableLeaderElection }}" + - name: ENABLE_CLUSTER_API + value: "{{ .Values.enableClusterApi }}" + - name: RECONCILE_TIMEOUT + value: "{{ .Values.reconcileTimeout }}" + - name: KUBE_EXEC_TIMEOUT + value: "{{ .Values.kubeExecTimeout }}" + - name: WEKA_ALLOC_ZOMBIE_DELETE_AFTER + value: "{{ .Values.wekaAllocZombieDeleteAfter }}" + - name: MAX_WORKERS_WEKACLUSTER + value: "{{ .Values.maxWorkers.wekaCluster }}" + - name: MAX_WORKERS_WEKACONTAINER + value: "{{ .Values.maxWorkers.wekaContainer }}" + - name: MAX_WORKERS_WEKACLIENT + value: "{{ .Values.maxWorkers.wekaClient }}" + - name: MAX_WORKERS_WEKAMANUALOPERATION + value: "{{ .Values.maxWorkers.wekaManualOperation }}" + - name: MAX_WORKERS_WEKAPOLICY + value: "{{ .Values.maxWorkers.wekaPolicy }}" + - name: METRICS_CLUSTERS_ENABLED + value: "{{ .Values.metrics.clusters.enabled }}" + - name: METRICS_CLUSTERS_POLLING_RATE + value: "{{ .Values.metrics.clusters.pollingRate }}" + - name: METRICS_CONTAINERS_ENABLED + value: "{{ .Values.metrics.containers.enabled }}" + - name: METRICS_CONTAINERS_POLLING_RATE + value: "{{ .Values.metrics.containers.pollingRate }}" + - name: METRICS_CONTAINERS_REQUEST_TIMEOUT_REGISTER + value: "{{ .Values.metrics.containers.requestsTimeouts.register }}" + - name: METRICS_CONTAINERS_REQUEST_TIMEOUT_GET_CONTAINER_INFO + value: "{{ .Values.metrics.containers.requestsTimeouts.getContainerInfo }}" + - name: LOCAL_DATA_PVC + value: "{{ .Values.localDataPvc }}" + - name: DNS_POLICY_K8S_NETWORK + value: "{{ .Values.dnsPolicy.k8sNetwork }}" + - name: DNS_POLICY_HOST_NETWORK + value: "{{ .Values.dnsPolicy.hostNetwork }}" + - name: SIGN_DRIVES_IMAGE + value: "{{ .Values.signDrivesImage }}" + - name: UPGRADE_COMPUTE_THRESHOLD_PERCENT + value: "{{ .Values.upgrade.computeThresholdPercent }}" + - name: UPGRADE_DRIVE_THRESHOLD_PERCENT + value: "{{ .Values.upgrade.driveThresholdPercent }}" + - name: UPGRADE_MAX_DEACTIVATING_CONTAINERS_PERCENT + value: "{{ .Values.upgrade.maxDeactivatingContainersPercent }}" + - name: SKIP_CLIENT_NO_SCHEDULE_TOLERATION + value: "{{ .Values.skipClientNoScheduleToleration }}" + - name: SKIP_AUX_NO_SCHEDULE_TOLERATION + value: "{{ .Values.skipAuxNoScheduleToleration }}" + - name: EVICT_CONTAINER_ON_DELETION + value: "{{ .Values.evictContainerOnDeletion }}" + - name: SKIP_CLIENTS_TOLERATION_VALIDATION + value: "{{ .Values.skipClientsTolerationValidation }}" + + + image: "{{ .Values.image.repository }}:{{if not (empty .Values.image.tag)}}{{ .Values.image.tag }}{{else}}{{ .Chart.Version }}{{end}}" + imagePullPolicy: Always + volumeMounts: + - mountPath: /tmp + name: tmpdir + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + {{- toYaml .Values.manager.resources | nindent 12 }} + serviceAccountName: "{{ .Values.prefix }}-controller-manager" + terminationGracePeriodSeconds: 10 diff --git a/weka-operator/weka-operator/templates/metrics_daemonset.yaml b/weka-operator/weka-operator/templates/metrics_daemonset.yaml new file mode 100644 index 00000000..9715752f --- /dev/null +++ b/weka-operator/weka-operator/templates/metrics_daemonset.yaml @@ -0,0 +1,121 @@ +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: "{{ .Values.prefix }}-node-agent" + labels: + app.kubernetes.io/component: weka-node-agent + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/instance: weka-node-agent + app.kubernetes.io/name: weka-node-agent + app.kubernetes.io/part-of: weka-operator + control-plane: node-agent +spec: + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 100% + maxUnavailable: 0 + selector: + matchLabels: + control-plane: weka-node-agent + app.kubernetes.io/component: weka-node-agent + template: + metadata: + annotations: + "prometheus.io/scrape": "true" + "prometheus.io/port": "8090" + "prometheus.io/path": "/metrics" + "kubectl.kubernetes.io/default-container": node-agent + labels: + control-plane: weka-node-agent + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/component: weka-node-agent + app: weka-node-agent + spec: + securityContext: + runAsNonRoot: false + imagePullSecrets: + - name: "{{ .Values.imagePullSecret }}" + {{ if .Values.nodeAgent.nodeSelector }} + nodeSelector: + {{- toYaml .Values.nodeAgent.nodeSelector | nindent 8 }} + {{- end }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - key: kubernetes.io/os + operator: In + values: + - linux + tolerations: + {{- if .Values.nodeAgent.tolerations }} + {{- toYaml .Values.nodeAgent.tolerations | nindent 8 }} + {{- end }} + {{- with .Values.dnsPolicy }} + dnsPolicy: {{ .k8sNetwork | default "" }} + {{- end }} + volumes: + - name: tmpdir + emptyDir: { } + - name: token + secret: + secretName: "weka-node-agent-secret" + - name: weka-persistence + hostPath: + path: {{ .Values.nodeAgent.persistencePaths }}/shared + + containers: + - command: + - /weka-operator + ports: + - containerPort: 8090 + name: http + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_UID + valueFrom: + fieldRef: + fieldPath: metadata.uid + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: OTEL_DEPLOYMENT_IDENTIFIER + value: "{{ .Values.deploymentIdentifier }}" + - name: OTEL_EXPORTER_OTLP_ENDPOINT + value: "{{ .Values.otelExporterOtlpEndpoint }}" + - name: VERSION + value: "{{ .Values.Version }}" + - name: OPERATOR_MODE + value: node-agent + - name: NODE_AGENT_BIND_ADDRESS + value: ":8090" + + image: "{{ .Values.image.repository }}:{{if not (empty .Values.image.tag)}}{{ .Values.image.tag }}{{else}}{{ .Chart.Version }}{{end}}" + imagePullPolicy: Always + volumeMounts: + - mountPath: /tmp + name: tmpdir + - mountPath: /host-binds/shared + name: weka-persistence + - mountPath: /var/run/secrets/kubernetes.io/token + name: token + readOnly: true + name: node-agent + securityContext: + privileged: true + resources: + {{- toYaml .Values.nodeAgent.resources | nindent 12 }} + serviceAccountName: "{{ .Values.prefix }}-maintenance" + terminationGracePeriodSeconds: 10 diff --git a/weka-operator/weka-operator/templates/node_describe_role.yaml b/weka-operator/weka-operator/templates/node_describe_role.yaml new file mode 100644 index 00000000..5c3283e8 --- /dev/null +++ b/weka-operator/weka-operator/templates/node_describe_role.yaml @@ -0,0 +1,15 @@ +# cluster role that allowed to perform describe ("get") over ["nodes"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/name: clusterrole + app.kubernetes.io/part-of: weka-operator + name: "{{ .Values.prefix }}-node-describe-role" +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] diff --git a/weka-operator/weka-operator/templates/ocp_maintenance_security_context_constraint.yaml b/weka-operator/weka-operator/templates/ocp_maintenance_security_context_constraint.yaml new file mode 100644 index 00000000..41835fee --- /dev/null +++ b/weka-operator/weka-operator/templates/ocp_maintenance_security_context_constraint.yaml @@ -0,0 +1,23 @@ +{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: {{ .Values.prefix }}-maintenance-scc +allowPrivilegedContainer: true +allowHostDirVolumePlugin: true +allowedVolumeTypes: + - hostPath + - secret +readOnlyRootFilesystem: false +allowHostPorts: true +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +users: + - system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.prefix }}-maintenance +{{- end }} diff --git a/weka-operator/weka-operator/templates/ocp_node_tuning.yaml b/weka-operator/weka-operator/templates/ocp_node_tuning.yaml new file mode 100644 index 00000000..77559d5d --- /dev/null +++ b/weka-operator/weka-operator/templates/ocp_node_tuning.yaml @@ -0,0 +1,28 @@ +{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }} +{{- if .Values.ocpCompatibility.hugepageConfiguration.enabled }} +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfig +metadata: + labels: + machineconfiguration.openshift.io/role: {{ .Values.ocpCompatibility.hugepageConfiguration.machineConfigNodeLabel }} + name: worker-hugepages-settings +spec: + kernelArguments: + - hugepagesz={{.Values.ocpCompatibility.hugepageConfiguration.hugepageSize }} hugepages={{ .Values.ocpCompatibility.hugepageConfiguration.hugepagesCount }} + +... + +apiVersion: machineconfiguration.openshift.io/v1 +kind: MachineConfigPool +metadata: + name: hugepages +spec: + machineConfigSelector: + matchExpressions: + - {key: machineconfiguration.openshift.io/role, operator: In, values: [{{ .Values.ocpCompatibility.hugepageConfiguration.machineConfigNodeLabel }}]} + {{- if .Values.ocpCompatibility.hugepageConfiguration.nodeSelector }} + nodeSelector: + {{- .Values.ocpCompatibility.hugepageConfiguration.nodeSelector | toYaml | nindent 4 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/weka-operator/weka-operator/templates/ocp_security_context_constraint.yaml b/weka-operator/weka-operator/templates/ocp_security_context_constraint.yaml new file mode 100644 index 00000000..07047d1d --- /dev/null +++ b/weka-operator/weka-operator/templates/ocp_security_context_constraint.yaml @@ -0,0 +1,24 @@ +{{- if .Capabilities.APIVersions.Has "security.openshift.io/v1/SecurityContextConstraints" }} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: {{ .Values.prefix }}-controller-manager-scc + +allowPrivilegedContainer: true +allowHostDirVolumePlugin: true +allowedVolumeTypes: + - hostPath + - secret +readOnlyRootFilesystem: false +allowHostPorts: true +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +users: + - system:serviceaccount:{{ .Release.Namespace }}:{{ .Values.prefix }}-controller-manager +{{- end }} diff --git a/weka-operator/weka-operator/templates/ocp_versions_configmap.yaml b/weka-operator/weka-operator/templates/ocp_versions_configmap.yaml new file mode 100644 index 00000000..d26bbb5b --- /dev/null +++ b/weka-operator/weka-operator/templates/ocp_versions_configmap.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: ocp-driver-toolkit-images +data: + 414.92.202404162000-0: "259cbd840454c6d9030c21a5d24be0599abc4941cdd525a80f6eeb5d67e7908c" + 414.92.202404231906-0: "20dce872df7c233a34179b4356acc1c6cbd80c56cff053fa437de3b6595f9710" + 414.92.202404301839-0: "20dce872df7c233a34179b4356acc1c6cbd80c56cff053fa437de3b6595f9710" + 415.92.202402201450-0: "cd0ea5d8ec43c5b03bf362e0b595bafe3e97e222d4344a851453ebe8770df135" + 415.92.202403061641-0: "9d974e00ebe924fbd03abf03c55d873108a1593b5a5e60f0daf4b867fc5bb1b1" + 415.92.202403080220-0: "9d974e00ebe924fbd03abf03c55d873108a1593b5a5e60f0daf4b867fc5bb1b1" + 415.92.202403191241-0: "abbff60a77f7ac2276dbeef33fb46ed32c9b9eb1c5813260c6383605bed76a08" + 415.92.202403270524-0: "b9cd86347ba410c90b4a34fe9c1b25951e0f0cd38ceca1d3ccd4bae96f084edb" + 415.92.202404161628-0: "be818782c507d76b48f9f37bcf85e5d5311514ff9e6108b047f80bf6331e63f5" + 415.92.202404251009-0: "bae8035c05d095e84c62efcab6202330a98493cab03e091c81a0b792afb5672c" + 415.92.202404302054-0: "bae8035c05d095e84c62efcab6202330a98493cab03e091c81a0b792afb5672c" + 415.92.202405070140-0: "985b72435a7091702a520581eb51ebd439bfe6ff39c33ffaaad7e30b9e321454" + 415.92.202405130844-0: "985b72435a7091702a520581eb51ebd439bfe6ff39c33ffaaad7e30b9e321454" + 415.92.202405201956-0: "934af754e2fbc8ed5deb7c4b22299c6c7b4504e6d8d9fd50fc3ad374616d70a9" + 415.92.202405281402-0: "d493e0bd8959e0d117493453db9c183e8bca34f73eb89b091134a087554fa0e8" + 415.92.202406041802-0: "9d2c61bf746c966f71bc6c6a3797303a7d3bfaef09040dfde85f19383d19681b" + 415.92.202406111137-0: "efa99ae171e7db22aa2d320b7bc78e950db01987889b6a8529e1945670e80792" + 416.94.202406172220-0: "dde3cd6a75d865a476aa7e1cab6fa8d97742401e87e0d514f3042c3a881e301f" + 416.94.202406251923-0: "8ef92caba7bd5d6ab3a139da782bf5651c2a40802eaa33b0c7899a7e897e007b" + 416.94.202407030122-0: "e5e6de7572003ac560f113a0082594a585c49d51801f028f699b15262eff7c02" + 416.94.202407081958-0: "a73204d0c03454b02656801ca4c49cb2d8b0d54645bb90f74622df335c82dce1" \ No newline at end of file diff --git a/weka-operator/weka-operator/templates/podmonitor.yaml b/weka-operator/weka-operator/templates/podmonitor.yaml new file mode 100644 index 00000000..2e5e30a2 --- /dev/null +++ b/weka-operator/weka-operator/templates/podmonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.podMonitor.enabled (.Capabilities.APIVersions.Has "monitoring.coreos.com/v1") }} +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: weka-node-monitor +spec: + namespaceSelector: + any: true + selector: + matchLabels: + app: weka-node-agent + podMetricsEndpoints: + - targetPort: 8090 + path: /metrics + interval: 30s + scrapeTimeout: 30s +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: weka-cluster-monitor +spec: + namespaceSelector: + any: true + selector: + matchLabels: + app: weka-cluster-monitoring + podMetricsEndpoints: + - targetPort: 80 + path: /metrics + interval: 30s + scrapeTimeout: 30s +{{- end }} \ No newline at end of file diff --git a/weka-operator/weka-operator/templates/role.yaml b/weka-operator/weka-operator/templates/role.yaml new file mode 100644 index 00000000..ab3be5e6 --- /dev/null +++ b/weka-operator/weka-operator/templates/role.yaml @@ -0,0 +1,140 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: weka-operator-manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - create + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - get + - patch + - update +- apiGroups: + - "" + resources: + - pods + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - pods/exec + verbs: + - create +- apiGroups: + - apps + resources: + - daemonsets + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - batch + resources: + - jobs + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - metrics.k8s.io + resources: + - pods + verbs: + - get +- apiGroups: + - weka.weka.io + resources: + - driveclaims + - wekaclients + - wekaclusters + - wekacontainers + - wekamanualoperations + - wekapolicies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - weka.weka.io + resources: + - driveclaims/finalizers + - wekaclients/finalizers + - wekaclusters/finalizers + - wekacontainers/finalizers + - wekamanualoperations/finalizers + - wekapolicies/finalizers + verbs: + - update +- apiGroups: + - weka.weka.io + resources: + - driveclaims/status + - wekaclients/status + - wekaclusters/status + - wekacontainers/status + - wekamanualoperations/status + - wekapolicies/status + verbs: + - get + - patch + - update diff --git a/weka-operator/weka-operator/templates/role_binding.yaml b/weka-operator/weka-operator/templates/role_binding.yaml new file mode 100644 index 00000000..5c311009 --- /dev/null +++ b/weka-operator/weka-operator/templates/role_binding.yaml @@ -0,0 +1,19 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/part-of: weka-operator + name: "{{ .Values.prefix }}-manager-rolebinding" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "{{ .Values.prefix }}-manager-role" +subjects: + - kind: ServiceAccount + name: "{{ .Values.prefix }}-controller-manager" + namespace: "{{ .Release.Namespace }}" diff --git a/weka-operator/weka-operator/templates/service_account.yaml b/weka-operator/weka-operator/templates/service_account.yaml new file mode 100644 index 00000000..fda9e5d6 --- /dev/null +++ b/weka-operator/weka-operator/templates/service_account.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: weka-operator + app.kubernetes.io/part-of: weka-operator + name: "{{ .Values.prefix }}-controller-manager" diff --git a/weka-operator/weka-operator/templates/weka_boot_scripts_configmap.yaml b/weka-operator/weka-operator/templates/weka_boot_scripts_configmap.yaml new file mode 100644 index 00000000..78bc898d --- /dev/null +++ b/weka-operator/weka-operator/templates/weka_boot_scripts_configmap.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: weka-boot-scripts +data: + syslog-ng.conf: | +{{ .Files.Get "resources/syslog-ng.conf" | indent 4 }} + run-weka-cli.sh: | +{{ .Files.Get "resources/run-weka-cli.sh" | indent 4 }} + weka_runtime.py: | +{{ .Files.Get "resources/weka_runtime.py" | indent 4 }} + devenv.sh: | +{{ .Files.Get "resources/cos-devenv.sh" | indent 4 }} diff --git a/weka-operator/weka-operator/values.yaml b/weka-operator/weka-operator/values.yaml new file mode 100644 index 00000000..b7f0abc8 --- /dev/null +++ b/weka-operator/weka-operator/values.yaml @@ -0,0 +1,166 @@ +--- +# Default values for weka-operator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Prefix for all resources created by the operator +prefix: "weka-operator" + +image: + repository: quay.io/weka.io/weka-operator + # Override only, default equals to the chart version + tag: "" + +imagePullSecret: quay-io-robot-secret +deployController: true +# -- experimental: enables custtom HTTP API endpoint as alternative to K8s API +enableClusterApi: false +# -- enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. +enableLeaderElection: true +# -- custom endpoint to send OTLP traces to (insecure, GRPC). +# If left empty, traces are sent to stdout in json format. +otelExporterOtlpEndpoint: "" +# -- version of weka-operator +version: "" +# logging settings +logging: + level: 0 + timeOnly: true +# -- can be overridden with any other basic linux image +maintenanceImage: quay.io/weka.io/busybox +# -- if required, you can specify a pull secret for that image +# maintenanceImagePullSecret: "" +# -- used by various operations that are not in context of wekacluster/wekacontainer, for example: GC of persistent storage/removal of tobmstones +wekahome: + endpoint: https://api.home.weka.io + # -- allow insecure TLS connection to the wekahome endpoint (no server certificate validation) + allowInsecureTLS: false + # -- name of the secret specifying CA certificate chain. It is assumed that every target namespace will have such secret + cacertSecret: "" + enableStats: true + +reconcileTimeout: 30m +kubeExecTimeout: 5m + +# -- maximum number of workers for each reconciler (for each resource type) +maxWorkers: + wekaCluster: 5 + wekaContainer: 50 + wekaClient: 5 + wekaManualOperation: 5 + wekaPolicy: 5 + +wekaAllocZombieDeleteAfter: 5m + +operatorMetricsBindAddress: "127.0.0.1:8080" +nodeAgentMetricsBindAddress: ":8090" +healthProbeBindAddress: ":8081" + +ocpCompatibility: + driverToolkitSecretName: + driverToolkitImageBaseUrl: quay.io/openshift-release-dev/ocp-v4.0-art-dev + # -- automatically create Tuned profile for the cluster nodes to apply hugepages configuration. + # -- WARNING: do not enable this feature if you are already using custom Tuned profiles for setting hugepages + hugepageConfiguration: + # -- if enabled, the operator will configure hugepages for the OCP nodes + enabled: false + # -- on which nodes to configure hugepages + machineConfigNodeLabel: "worker" + # -- hugepage size + hugepageSize: "2M" + # -- hugepage count + hugepagesCount: 4000 + nodeSelector: + matchLabels: + - node-role.kubernetes.io/worker: "" + +gkeCompatibility: + gkeServiceAccountSecret: "" + # -- automatically configure hugepages for the GKE nodes + hugepageConfiguration: + # -- if enabled, the operator will configure hugepages for the GKE nodes (WARNING: will reboot nodes forcefully!) + enabled: false + # -- hugepage size + hugepageSize: "2M" + # -- hugepage count + hugepageCount: 4000 + # -- if enabled, the operator will disable driver signing enforcement on the GKE nodes (WARNING: will reboot nodes forcefully!) + disableDriverSigning: false + +manager: + resources: + limits: + cpu: 1000m + memory: 4096Mi + requests: + cpu: 250m + memory: 64Mi + nodeSelector: {} + tolerations: [] + labels: {} + +debugSleep: 3 + +tolerations: [] +# -- if true, pods will not tolerate unhealthy nodes (unschedulable and not-ready) +skipUnhealthyToleration: false +# -- if true, client pods will not tolerate NoSchedule taints +skipClientNoScheduleToleration: false +# -- if true, auxiliary pods (discovery, sign-drives, etc.) will not tolerate NoSchedule taints +skipAuxNoScheduleToleration: false +deploymentIdentifier: "" + +# -- if true, the operator will consider nodes that are gone as totally gone (not in the cluster anymore) +# the operator will delete backend wekacontainers from the nodes that are not part of the k8s cluster anymore +cleanupRemovedNodes: false +# -- if true, the operator will delete backend wekacontainers from the nodes that do not match node selectors +cleanupOnNodeSelectorMismatch: false +# -- if true, the operator will delete client wekacontainers from the nodes that do not match node selectors +cleanupClientsOnNodeSelectorMismatch: false +# -- if true, the operator will delete any wekacontainers from the nodes that do not tolerate taints +cleanupContainersOnTolerationsMismatch: false +# -- if true, the operator will enforce deactivation on pod removal +evictContainerOnDeletion: false +skipClientsTolerationValidation: false + +signDrivesImage: quay.io/weka.io/weka-sign-tool:v0.1.1-pciutils + +nodeAgent: + resources: + limits: + cpu: 1000m + memory: 1024Mi + requests: + cpu: 50m + memory: 64Mi + tolerations: [] + nodeSelector: {} + # default set for generic OSes, can be overridden /root/k8s-weka for openshift, /mnt/stateful_partition/k8s-weka for google container OS + persistencePaths: /opt/k8s-weka + +metrics: + clusters: + enabled: true + pollingRate: 60s + containers: + enabled: true + pollingRate: 60s + requestsTimeouts: + register: 3s + getContainerInfo: 10s + +localDataStorageClass: "" +localDataPvc: "" +upgrade: + computeThresholdPercent: 90 + driveThresholdPercent: 90 + maxDeactivatingContainersPercent: 10 +podMonitor: + enabled: true + +# DNS policy configuration +dnsPolicy: + # dns policy for the pods without hostNetwork: true + k8sNetwork: "" + # dns policy for the pods with hostNetwork: true + hostNetwork: "" diff --git a/weka-operator/wekacontainer.yaml b/weka-operator/wekacontainer.yaml new file mode 100644 index 00000000..ef6792fd --- /dev/null +++ b/weka-operator/wekacontainer.yaml @@ -0,0 +1,18 @@ +--- +apiVersion: weka.weka.io/v1alpha1 +kind: WekaContainer +metadata: + name: weka-driver-builder + namespace: weka-operator-system + labels: + app: weka-driver-builder +spec: + agentPort: 60001 + nodeSelector: + weka.io/role: builder + image: quay.io/weka.io/weka-in-container:4.4.5.128-k8s + imagePullSecret: "weka-quayio-creds" + mode: "dist" + name: dist + numCores: 1 + port: 60002 From f74644c1e99a81efd8aae85ffe5ea207d53f8894 Mon Sep 17 00:00:00 2001 From: Justin Riley Date: Wed, 22 Oct 2025 10:14:49 -0400 Subject: [PATCH 5/6] install csi driver from helm/operator --- weka-operator/run.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/weka-operator/run.sh b/weka-operator/run.sh index a8ebea5c..b9a299f6 100644 --- a/weka-operator/run.sh +++ b/weka-operator/run.sh @@ -24,4 +24,5 @@ helm upgrade --install weka-operator oci://quay.io/weka.io/helm/weka-operator \ --version v1.7.0 \ --set nodeAgent.persistencePaths=/root/k8s-weka \ --set ocpCompatibility.hugepageConfiguration.enabled=true \ - --set ocpCompatibility.hugepageConfiguration.hugepagesCount=4000 + --set ocpCompatibility.hugepageConfiguration.hugepagesCount=4000 \ + --set csi.installationEnabled=true From a450a81f6e84c21a2986525bfcd2f1dd3fa80e11 Mon Sep 17 00:00:00 2001 From: Justin Riley Date: Wed, 22 Oct 2025 10:15:19 -0400 Subject: [PATCH 6/6] WIP: convert helm chart to kustomize --- weka-operator/base/Makefile | 8 ++++++++ weka-operator/base/src/kustomization.yaml | 12 ++++++++++++ weka-operator/pvc.yaml | 18 ++++++++++++++++++ 3 files changed, 38 insertions(+) create mode 100644 weka-operator/base/Makefile create mode 100644 weka-operator/base/src/kustomization.yaml create mode 100644 weka-operator/pvc.yaml diff --git a/weka-operator/base/Makefile b/weka-operator/base/Makefile new file mode 100644 index 00000000..c908a3f8 --- /dev/null +++ b/weka-operator/base/Makefile @@ -0,0 +1,8 @@ +all: weka-operator.yaml + +# Generate manifests from the helm chart using `kustomize build --enable-helm`. +weka-operator.yaml: $(wildcard src/*) + kustomize build --enable-helm src > $@ || { rm -f $@; exit 1; } + +clean: + rm -f weka-operator.yaml diff --git a/weka-operator/base/src/kustomization.yaml b/weka-operator/base/src/kustomization.yaml new file mode 100644 index 00000000..20140024 --- /dev/null +++ b/weka-operator/base/src/kustomization.yaml @@ -0,0 +1,12 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: weka-operator + +helmCharts: + - name: weka-operator + namespace: weka-operator + repo: oci://quay.io/weka.io/helm/weka-operator + version: v1.7.0 + includeCRDs: true + releaseName: weka-operator + valuesFile: values.yaml diff --git a/weka-operator/pvc.yaml b/weka-operator/pvc.yaml new file mode 100644 index 00000000..3205289e --- /dev/null +++ b/weka-operator/pvc.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + annotations: + pv.kubernetes.io/bind-completed: "yes" + pv.kubernetes.io/bound-by-controller: "yes" + volume.beta.kubernetes.io/storage-provisioner: csi.weka.io + volume.kubernetes.io/storage-provisioner: csi.weka.io + name: pvc-wekafs-dir-api2 + namespace: default +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 200Gi + storageClassName: storageclass-wekafs-dir-api + volumeName: pvc-1bdd7c1f-6229-4861-b2b3-05151c75447a