--- apiVersion: monitoring.coreos.com/v1 kind: Prometheus metadata: annotations: operator.prometheus.io/controller-id: openshift-monitoring/prometheus-operator creationTimestamp: "2026-02-17T12:49:38Z" generation: 2 labels: app.kubernetes.io/component: prometheus app.kubernetes.io/instance: k8s app.kubernetes.io/managed-by: cluster-monitoring-operator app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: openshift-monitoring app.kubernetes.io/version: 3.5.0 managedFields: - apiVersion: monitoring.coreos.com/v1 fieldsType: FieldsV1 fieldsV1: f:status: f:availableReplicas: {} f:conditions: k:{"type":"Available"}: .: {} f:lastTransitionTime: {} f:message: {} f:observedGeneration: {} f:reason: {} f:status: {} f:type: {} k:{"type":"Reconciled"}: .: {} f:lastTransitionTime: {} f:message: {} f:observedGeneration: {} f:reason: {} f:status: {} f:type: {} f:paused: {} f:replicas: {} f:selector: {} f:shardStatuses: k:{"shardID":"0"}: .: {} f:availableReplicas: {} f:replicas: {} f:shardID: {} f:unavailableReplicas: {} f:updatedReplicas: {} f:shards: {} f:unavailableReplicas: {} f:updatedReplicas: {} manager: PrometheusOperator operation: Apply subresource: status time: "2026-02-17T12:51:59Z" - apiVersion: monitoring.coreos.com/v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:operator.prometheus.io/controller-id: {} f:labels: .: {} f:app.kubernetes.io/component: {} f:app.kubernetes.io/instance: {} f:app.kubernetes.io/managed-by: {} f:app.kubernetes.io/name: {} f:app.kubernetes.io/part-of: {} f:app.kubernetes.io/version: {} f:spec: .: {} f:additionalAlertRelabelConfigs: {} f:additionalArgs: {} f:alerting: .: {} f:alertmanagers: {} f:arbitraryFSAccessThroughSMs: {} f:configMaps: {} f:containers: {} f:enableFeatures: .: {} v:"delayed-compaction": {} f:evaluationInterval: {} f:externalUrl: {} f:image: {} f:listenLocal: {} f:maximumStartupDurationSeconds: {} f:nodeSelector: .: {} f:kubernetes.io/os: {} f:podMetadata: .: {} f:annotations: .: {} f:openshift.io/required-scc: {} f:target.workload.openshift.io/management: {} f:labels: .: {} f:app.kubernetes.io/component: {} f:app.kubernetes.io/instance: {} f:app.kubernetes.io/name: {} f:app.kubernetes.io/part-of: {} f:app.kubernetes.io/version: {} f:podMonitorNamespaceSelector: {} f:podMonitorSelector: {} f:portName: {} f:priorityClassName: {} f:probeNamespaceSelector: {} f:probeSelector: {} f:replicas: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:retention: {} f:ruleNamespaceSelector: {} f:ruleSelector: {} f:rules: .: {} f:alert: {} f:scrapeClasses: .: {} k:{"name":"tls-client-certificate-auth"}: .: {} f:name: {} f:tlsConfig: .: {} f:ca: {} f:caFile: {} f:cert: {} f:certFile: {} f:insecureSkipVerify: {} f:keyFile: {} f:scrapeInterval: {} f:secrets: .: {} v:"kube-rbac-proxy": {} v:"metrics-client-certs": {} v:"prometheus-k8s-kube-rbac-proxy-web": {} v:"prometheus-k8s-thanos-sidecar-tls": {} v:"prometheus-k8s-tls": {} f:securityContext: .: {} f:fsGroup: {} f:runAsNonRoot: {} f:runAsUser: {} f:serviceAccountName: {} f:serviceMonitorNamespaceSelector: {} f:serviceMonitorSelector: {} f:thanos: .: {} f:blockSize: {} f:image: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:version: {} f:version: {} f:volumes: {} f:web: .: {} f:httpConfig: .: {} f:headers: .: {} f:contentSecurityPolicy: {} manager: operator operation: Update time: "2026-02-17T12:50:57Z" name: k8s namespace: openshift-monitoring resourceVersion: "11201" uid: e440c487-fda8-43fc-a979-955c964cba8f spec: additionalAlertRelabelConfigs: key: config.yaml name: alert-relabel-configs optional: true additionalArgs: - name: scrape.timestamp-tolerance value: 15ms - name: no-auto-gomemlimit alerting: alertmanagers: - apiVersion: v2 bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token name: alertmanager-main namespace: openshift-monitoring port: web scheme: https tlsConfig: ca: {} caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt cert: {} serverName: alertmanager-main.openshift-monitoring.svc arbitraryFSAccessThroughSMs: {} configMaps: - serving-certs-ca-bundle - kubelet-serving-ca-bundle - metrics-client-ca containers: - args: - --secure-listen-address=0.0.0.0:9091 - --upstream=http://127.0.0.1:9090 - --config-file=/etc/kube-rbac-proxy/config.yaml - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - --ignore-paths=/-/healthy,/-/ready - --tls-min-version=VersionTLS12 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:890bcb4e15a58b5fb9ee5641042ea42e78505f18f95d019e6d6582994151027e name: kube-rbac-proxy-web ports: - containerPort: 9091 name: web protocol: TCP resources: requests: cpu: 1m memory: 15Mi securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/tls/private name: secret-prometheus-k8s-tls - mountPath: /etc/kube-rbac-proxy name: secret-prometheus-k8s-kube-rbac-proxy-web - args: - --secure-listen-address=0.0.0.0:9092 - --upstream=http://127.0.0.1:9090 - --allow-paths=/metrics,/federate - --config-file=/etc/kube-rbac-proxy/config.yaml - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key - --client-ca-file=/etc/tls/client/client-ca.crt - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - --tls-min-version=VersionTLS12 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:890bcb4e15a58b5fb9ee5641042ea42e78505f18f95d019e6d6582994151027e name: kube-rbac-proxy ports: - containerPort: 9092 name: metrics protocol: TCP resources: requests: cpu: 1m memory: 15Mi terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/tls/private name: secret-prometheus-k8s-tls - mountPath: /etc/tls/client name: configmap-metrics-client-ca readOnly: true - mountPath: /etc/kube-rbac-proxy name: secret-kube-rbac-proxy - args: - --secure-listen-address=[$(POD_IP)]:10903 - --upstream=http://127.0.0.1:10902 - --tls-cert-file=/etc/tls/private/tls.crt - --tls-private-key-file=/etc/tls/private/tls.key - --client-ca-file=/etc/tls/client/client-ca.crt - --config-file=/etc/kube-rbac-proxy/config.yaml - --tls-cipher-suites=TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 - --allow-paths=/metrics - --tls-min-version=VersionTLS12 env: - name: POD_IP valueFrom: fieldRef: fieldPath: status.podIP image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:890bcb4e15a58b5fb9ee5641042ea42e78505f18f95d019e6d6582994151027e name: kube-rbac-proxy-thanos ports: - containerPort: 10903 name: thanos-proxy protocol: TCP resources: requests: cpu: 1m memory: 10Mi terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/tls/private name: secret-prometheus-k8s-thanos-sidecar-tls readOnly: true - mountPath: /etc/kube-rbac-proxy name: secret-kube-rbac-proxy readOnly: true - mountPath: /etc/tls/client name: configmap-metrics-client-ca readOnly: true - args: - sidecar - --prometheus.url=http://localhost:9090/ - --tsdb.path=/prometheus - --http-address=127.0.0.1:10902 - --grpc-server-tls-cert=/etc/tls/grpc/server.crt - --grpc-server-tls-key=/etc/tls/grpc/server.key - --grpc-server-tls-client-ca=/etc/tls/grpc/ca.crt name: thanos-sidecar resources: requests: cpu: 1m memory: 25Mi terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/tls/grpc name: secret-grpc-tls - env: - name: HTTP_PROXY - name: HTTPS_PROXY - name: NO_PROXY - name: GOGC value: "100" name: prometheus resources: {} terminationMessagePolicy: FallbackToLogsOnError volumeMounts: - mountPath: /etc/pki/ca-trust/extracted/pem/ name: prometheus-trusted-ca-bundle enableFeatures: - delayed-compaction evaluationInterval: 30s externalUrl: https://console-openshift-console.apps.d48236af-1db7-45b0-a057-4fa21fadfc2f.prod.konfluxeaas.com/monitoring image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33f676539c6617ae96bc20e0fa096a2ea77a1d1470b69c613d48b3b3cdd595b3 listenLocal: true maximumStartupDurationSeconds: 3600 nodeSelector: kubernetes.io/os: linux podMetadata: annotations: openshift.io/required-scc: nonroot target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}' labels: app.kubernetes.io/component: prometheus app.kubernetes.io/instance: k8s app.kubernetes.io/name: prometheus app.kubernetes.io/part-of: openshift-monitoring app.kubernetes.io/version: 3.5.0 podMonitorNamespaceSelector: matchLabels: openshift.io/cluster-monitoring: "true" podMonitorSelector: matchExpressions: - key: monitoring.openshift.io/collection-profile operator: NotIn values: - minimal portName: web priorityClassName: system-cluster-critical probeNamespaceSelector: matchLabels: openshift.io/cluster-monitoring: "true" probeSelector: matchExpressions: - key: monitoring.openshift.io/collection-profile operator: NotIn values: - minimal replicas: 1 resources: requests: cpu: 70m memory: 1Gi retention: 15d ruleNamespaceSelector: matchLabels: openshift.io/cluster-monitoring: "true" ruleSelector: {} rules: alert: {} scrapeClasses: - name: tls-client-certificate-auth tlsConfig: ca: {} caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt cert: {} certFile: /etc/prometheus/secrets/metrics-client-certs/tls.crt insecureSkipVerify: false keyFile: /etc/prometheus/secrets/metrics-client-certs/tls.key scrapeInterval: 30s secrets: - prometheus-k8s-tls - prometheus-k8s-thanos-sidecar-tls - kube-rbac-proxy - prometheus-k8s-kube-rbac-proxy-web - metrics-client-certs securityContext: fsGroup: 65534 runAsNonRoot: true runAsUser: 65534 serviceAccountName: prometheus-k8s serviceMonitorNamespaceSelector: matchLabels: openshift.io/cluster-monitoring: "true" serviceMonitorSelector: matchExpressions: - key: monitoring.openshift.io/collection-profile operator: NotIn values: - minimal thanos: blockSize: 2h image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4ef589a87f57c6da3abd479863acd50f8d867d52e652573ad6fa50a4dafc3d13 resources: requests: cpu: 1m memory: 100Mi version: 0.39.2 version: 3.5.0 volumes: - configMap: items: - key: ca-bundle.crt path: tls-ca-bundle.pem name: prometheus-trusted-ca-bundle name: prometheus-trusted-ca-bundle - name: secret-grpc-tls secret: secretName: prometheus-k8s-grpc-tls-5l1v479posc61 web: httpConfig: headers: contentSecurityPolicy: frame-ancestors 'none' status: availableReplicas: 1 conditions: - lastTransitionTime: "2026-02-17T12:51:59Z" message: "" observedGeneration: 2 reason: "" status: "True" type: Available - lastTransitionTime: "2026-02-17T12:51:59Z" message: "" observedGeneration: 2 reason: "" status: "True" type: Reconciled paused: false replicas: 1 selector: app.kubernetes.io/instance=k8s,app.kubernetes.io/managed-by=prometheus-operator,app.kubernetes.io/name=prometheus,operator.prometheus.io/name=k8s,prometheus=k8s shardStatuses: - availableReplicas: 1 replicas: 1 shardID: "0" unavailableReplicas: 0 updatedReplicas: 1 shards: 1 unavailableReplicas: 0 updatedReplicas: 1