ts=2026-03-11T14:02:00.108123616Z caller=client.go:84 level=info msg="enabling client to server TLS" ts=2026-03-11T14:02:00.108297428Z caller=options.go:128 level=info msg="TLS client using provided certificate pool" ts=2026-03-11T14:02:00.108333941Z caller=options.go:161 level=info msg="TLS client authentication enabled" ts=2026-03-11T14:02:00.109100964Z caller=options.go:29 level=info protocol=gRPC msg="disabled TLS, key and cert must be set to enable" ts=2026-03-11T14:02:00.109607979Z caller=query.go:851 level=info msg="starting query node" ts=2026-03-11T14:02:00.109750939Z caller=intrumentation.go:56 level=info msg="changing probe status" status=ready ts=2026-03-11T14:02:00.109818985Z caller=intrumentation.go:75 level=info msg="changing probe status" status=healthy ts=2026-03-11T14:02:00.109856177Z caller=http.go:73 level=info service=http/server component=query msg="listening for requests and metrics" address=127.0.0.1:9090 ts=2026-03-11T14:02:00.110037269Z caller=grpc.go:167 level=info service=gRPC/server component=query msg="listening for serving gRPC" address=127.0.0.1:10901 ts=2026-03-11T14:02:00.110115318Z caller=tls_config.go:348 level=info service=http/server component=query msg="Listening on" address=127.0.0.1:9090 ts=2026-03-11T14:02:00.110143659Z caller=tls_config.go:351 level=info service=http/server component=query msg="TLS is disabled." http2=false address=127.0.0.1:9090 ts=2026-03-11T14:02:00.114331389Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-03-11T14:02:00.116521324Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-03-11T14:02:00.118698663Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-03-11T14:02:30.115073036Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-03-11T14:02:30.117015887Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-03-11T14:02:30.119055881Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-03-11T14:03:05.122619723Z caller=endpointset.go:350 level=info component=endpointset msg="adding new sidecar with [storeEndpoints rulesAPI exemplarsAPI targetsAPI MetricMetadataAPI]" address=10.130.0.21:10901 extLset="{prometheus=\"openshift-monitoring/k8s\", prometheus_replica=\"prometheus-k8s-0\"}" ts=2026-03-11T14:03:05.122656729Z caller=endpointset.go:350 level=info component=endpointset msg="adding new sidecar with [storeEndpoints rulesAPI exemplarsAPI targetsAPI MetricMetadataAPI]" address=10.129.0.19:10901 extLset="{prometheus=\"openshift-monitoring/k8s\", prometheus_replica=\"prometheus-k8s-1\"}" ts=2026-03-11T14:03:15.110783133Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = context deadline exceeded" address=10.129.0.19:10901 ts=2026-03-11T14:03:20.111101146Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = context deadline exceeded" address=10.129.0.19:10901 ts=2026-03-11T14:03:25.112920254Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = context deadline exceeded" address=10.129.0.19:10901 ts=2026-03-11T14:03:30.113163549Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = latest balancer error: connection error: desc = \"transport: Error while dialing: dial tcp 10.129.0.19:10901: i/o timeout\"" address=10.129.0.19:10901 ts=2026-03-11T14:03:35.114583368Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = latest balancer error: connection error: desc = \"transport: Error while dialing: dial tcp 10.129.0.19:10901: i/o timeout\"" address=10.129.0.19:10901 ts=2026-03-11T14:03:35.11518005Z caller=endpointset.go:354 level=info component=endpointset msg="removing endpoint because it's unhealthy or does not exist" address=10.129.0.19:10901 extLset="{prometheus=\"openshift-monitoring/k8s\", prometheus_replica=\"prometheus-k8s-1\"}" ts=2026-03-11T14:04:15.111927011Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = latest balancer error: connection error: desc = \"transport: Error while dialing: dial tcp 10.130.0.21:10901: connect: connection refused\"" address=10.130.0.21:10901 ts=2026-03-11T14:04:20.112224221Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = latest balancer error: connection error: desc = \"transport: Error while dialing: dial tcp 10.130.0.21:10901: connect: connection refused\"" address=10.130.0.21:10901 ts=2026-03-11T14:04:25.11461034Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = latest balancer error: connection error: desc = \"transport: Error while dialing: dial tcp 10.130.0.21:10901: connect: connection refused\"" address=10.130.0.21:10901 ts=2026-03-11T14:04:30.11481042Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = latest balancer error: connection error: desc = \"transport: Error while dialing: dial tcp 10.130.0.21:10901: connect: connection refused\"" address=10.130.0.21:10901 ts=2026-03-11T14:04:35.117013844Z caller=endpointset.go:385 level=warn component=endpointset msg="update of endpoint failed" err="getting metadata: rpc error: code = DeadlineExceeded desc = latest balancer error: connection error: desc = \"transport: Error while dialing: dial tcp 10.130.0.21:10901: i/o timeout\"" address=10.130.0.21:10901 ts=2026-03-11T14:04:35.117089633Z caller=endpointset.go:350 level=info component=endpointset msg="adding new sidecar with [storeEndpoints rulesAPI exemplarsAPI targetsAPI MetricMetadataAPI]" address=10.129.0.21:10901 extLset="{prometheus=\"openshift-monitoring/k8s\", prometheus_replica=\"prometheus-k8s-1\"}" ts=2026-03-11T14:04:35.118009391Z caller=endpointset.go:354 level=info component=endpointset msg="removing endpoint because it's unhealthy or does not exist" address=10.130.0.21:10901 extLset="{prometheus=\"openshift-monitoring/k8s\", prometheus_replica=\"prometheus-k8s-0\"}" ts=2026-03-11T14:05:35.118389638Z caller=endpointset.go:350 level=info component=endpointset msg="adding new sidecar with [storeEndpoints rulesAPI exemplarsAPI targetsAPI MetricMetadataAPI]" address=10.130.0.25:10901 extLset="{prometheus=\"openshift-monitoring/k8s\", prometheus_replica=\"prometheus-k8s-0\"}"