--- apiVersion: v1 kind: Pod metadata: annotations: kubernetes.io/config.hash: cad5fa07f782ca2be3a004d69f182f5f kubernetes.io/config.mirror: cad5fa07f782ca2be3a004d69f182f5f kubernetes.io/config.seen: "2026-04-16T18:16:38.957924714Z" kubernetes.io/config.source: file creationTimestamp: "2026-04-16T18:16:39Z" generation: 1 labels: k8s-app: kube-apiserver-proxy managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:metadata: f:annotations: .: {} f:kubernetes.io/config.hash: {} f:kubernetes.io/config.mirror: {} f:kubernetes.io/config.seen: {} f:kubernetes.io/config.source: {} f:labels: .: {} f:k8s-app: {} f:ownerReferences: .: {} k:{"uid":"4b99bf87-05e8-4e5f-aa94-d4b155ce49a3"}: {} f:spec: f:containers: k:{"name":"haproxy"}: .: {} f:command: {} f:image: {} f:imagePullPolicy: {} f:livenessProbe: .: {} f:failureThreshold: {} f:httpGet: .: {} f:host: {} f:path: {} f:port: {} f:scheme: {} f:initialDelaySeconds: {} f:periodSeconds: {} f:successThreshold: {} f:timeoutSeconds: {} f:name: {} f:ports: .: {} k:{"containerPort":6443,"protocol":"TCP"}: .: {} f:containerPort: {} f:hostPort: {} f:name: {} f:protocol: {} f:resources: .: {} f:requests: .: {} f:cpu: {} f:memory: {} f:securityContext: .: {} f:runAsUser: {} f:terminationMessagePath: {} f:terminationMessagePolicy: {} f:volumeMounts: .: {} k:{"mountPath":"/usr/local/etc/haproxy"}: .: {} f:mountPath: {} f:name: {} f:dnsPolicy: {} f:enableServiceLinks: {} f:hostNetwork: {} f:nodeName: {} f:priorityClassName: {} f:restartPolicy: {} f:schedulerName: {} f:securityContext: {} f:terminationGracePeriodSeconds: {} f:tolerations: {} f:volumes: .: {} k:{"name":"config"}: .: {} f:hostPath: .: {} f:path: {} f:type: {} f:name: {} manager: kubelet operation: Update time: "2026-04-16T18:16:39Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:status: f:conditions: .: {} k:{"type":"ContainersReady"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Initialized"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodReadyToStartContainers"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"PodScheduled"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} k:{"type":"Ready"}: .: {} f:lastProbeTime: {} f:lastTransitionTime: {} f:status: {} f:type: {} f:containerStatuses: {} f:hostIP: {} f:hostIPs: {} f:phase: {} f:podIP: {} f:podIPs: .: {} k:{"ip":"10.0.131.203"}: .: {} f:ip: {} f:startTime: {} manager: kubelet operation: Update subresource: status time: "2026-04-16T18:16:58Z" name: kube-apiserver-proxy-ip-10-0-131-203.ec2.internal namespace: kube-system ownerReferences: - apiVersion: v1 controller: true kind: Node name: ip-10-0-131-203.ec2.internal uid: 4b99bf87-05e8-4e5f-aa94-d4b155ce49a3 resourceVersion: "6532" uid: 6e40bb7c-c437-4ec0-ab53-bc1a5bfd4049 spec: containers: - command: - haproxy - -f - /usr/local/etc/haproxy image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f2e4763905898d3870f64ebc9721d8d43ae2973f4ba295d48f84e36e6f72d013 imagePullPolicy: IfNotPresent livenessProbe: failureThreshold: 3 httpGet: host: 172.20.0.1 path: /version port: 6443 scheme: HTTPS initialDelaySeconds: 120 periodSeconds: 120 successThreshold: 1 timeoutSeconds: 1 name: haproxy ports: - containerPort: 6443 hostPort: 6443 name: apiserver protocol: TCP resources: requests: cpu: 13m memory: 16Mi securityContext: runAsUser: 1001 terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - mountPath: /usr/local/etc/haproxy name: config dnsPolicy: ClusterFirst enableServiceLinks: true hostNetwork: true nodeName: ip-10-0-131-203.ec2.internal preemptionPolicy: PreemptLowerPriority priority: 2000001000 priorityClassName: system-node-critical restartPolicy: Always schedulerName: default-scheduler securityContext: {} terminationGracePeriodSeconds: 30 tolerations: - effect: NoExecute operator: Exists - effect: NoSchedule key: node.kubernetes.io/memory-pressure operator: Exists volumes: - hostPath: path: /etc/kubernetes/apiserver-proxy-config type: "" name: config status: conditions: - lastProbeTime: null lastTransitionTime: "2026-04-16T18:16:58Z" status: "True" type: PodReadyToStartContainers - lastProbeTime: null lastTransitionTime: "2026-04-16T18:16:39Z" status: "True" type: Initialized - lastProbeTime: null lastTransitionTime: "2026-04-16T18:16:58Z" status: "True" type: Ready - lastProbeTime: null lastTransitionTime: "2026-04-16T18:16:58Z" status: "True" type: ContainersReady - lastProbeTime: null lastTransitionTime: "2026-04-16T18:16:39Z" status: "True" type: PodScheduled containerStatuses: - allocatedResources: cpu: 13m memory: 16Mi containerID: cri-o://83b63964826e5a2cb009b24a13fd5dd78032023e307a964a94e3c0eda324db02 image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f2e4763905898d3870f64ebc9721d8d43ae2973f4ba295d48f84e36e6f72d013 imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7534cffb860df8351d4e7c4872551f4238b71f161adb876a5852061455e5eb7e lastState: {} name: haproxy ready: true resources: requests: cpu: 13m memory: 16Mi restartCount: 0 started: true state: running: startedAt: "2026-04-16T18:16:57Z" user: linux: gid: 0 supplementalGroups: - 0 uid: 1001 volumeMounts: - mountPath: /usr/local/etc/haproxy name: config hostIP: 10.0.131.203 hostIPs: - ip: 10.0.131.203 phase: Running podIP: 10.0.131.203 podIPs: - ip: 10.0.131.203 qosClass: Burstable startTime: "2026-04-16T18:16:39Z"