ts=2026-02-05T14:16:51.729887879Z caller=client.go:84 level=info msg="enabling client to server TLS" ts=2026-02-05T14:16:51.730097354Z caller=options.go:120 level=info msg="TLS client using provided certificate pool" ts=2026-02-05T14:16:51.73042292Z caller=options.go:153 level=info msg="TLS client authentication enabled" ts=2026-02-05T14:16:51.731256971Z caller=options.go:26 level=info protocol=gRPC msg="disabled TLS, key and cert must be set to enable" ts=2026-02-05T14:16:51.731893825Z caller=query.go:833 level=info msg="starting query node" ts=2026-02-05T14:16:51.732046625Z caller=intrumentation.go:75 level=info msg="changing probe status" status=healthy ts=2026-02-05T14:16:51.73212234Z caller=http.go:73 level=info service=http/server component=query msg="listening for requests and metrics" address=127.0.0.1:9090 ts=2026-02-05T14:16:51.732428904Z caller=tls_config.go:313 level=info service=http/server component=query msg="Listening on" address=127.0.0.1:9090 ts=2026-02-05T14:16:51.732490959Z caller=tls_config.go:316 level=info service=http/server component=query msg="TLS is disabled." http2=false address=127.0.0.1:9090 ts=2026-02-05T14:16:51.732426955Z caller=intrumentation.go:56 level=info msg="changing probe status" status=ready ts=2026-02-05T14:16:51.732644369Z caller=grpc.go:167 level=info service=gRPC/server component=query msg="listening for serving gRPC" address=127.0.0.1:10901 ts=2026-02-05T14:16:51.738176746Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:16:51.742086046Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.thanos-ruler-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:16:51.745285545Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:16:51.74796856Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:16:51.750498766Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.thanos-ruler-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:16:51.752587062Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:16:51.754647568Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:16:51.75651668Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:21.73608788Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:21.740103866Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:21.744028109Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:21.748842872Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:21.750518049Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:21.752289822Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:26.746633234Z caller=endpointset.go:436 level=info component=endpointset msg="adding new rule with [storeEndpoints rulesAPI]" address=10.128.0.31:10901 extLset="{thanos_ruler_replica=\"thanos-ruler-user-workload-1\"}" ts=2026-02-05T14:17:26.746661308Z caller=endpointset.go:436 level=info component=endpointset msg="adding new rule with [storeEndpoints rulesAPI]" address=10.129.0.18:10901 extLset="{thanos_ruler_replica=\"thanos-ruler-user-workload-0\"}" ts=2026-02-05T14:17:51.736306365Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:51.740278348Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:51.741926276Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:51.745479647Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:51.748398758Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:17:51.751086901Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:18:21.736821935Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:18:21.744188502Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:18:21.75116141Z caller=resolver.go:99 level=error msg="failed to lookup SRV records" host=_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local err="no such host" ts=2026-02-05T14:18:26.743735632Z caller=endpointset.go:436 level=info component=endpointset msg="adding new sidecar with [storeEndpoints rulesAPI exemplarsAPI targetsAPI MetricMetadataAPI]" address=10.128.0.34:10901 extLset="{prometheus=\"openshift-monitoring/k8s\", prometheus_replica=\"prometheus-k8s-1\"}" ts=2026-02-05T14:18:26.743817727Z caller=endpointset.go:436 level=info component=endpointset msg="adding new sidecar with [storeEndpoints rulesAPI exemplarsAPI targetsAPI MetricMetadataAPI]" address=10.129.0.21:10901 extLset="{prometheus=\"openshift-monitoring/k8s\", prometheus_replica=\"prometheus-k8s-0\"}" ts=2026-02-05T14:18:56.745896364Z caller=endpointset.go:436 level=info component=endpointset msg="adding new sidecar with [storeEndpoints rulesAPI exemplarsAPI targetsAPI MetricMetadataAPI]" address=10.128.0.37:10901 extLset="{prometheus=\"openshift-user-workload-monitoring/user-workload\", prometheus_replica=\"prometheus-user-workload-1\"}" ts=2026-02-05T14:18:56.745937025Z caller=endpointset.go:436 level=info component=endpointset msg="adding new sidecar with [storeEndpoints rulesAPI exemplarsAPI targetsAPI MetricMetadataAPI]" address=10.129.0.23:10901 extLset="{prometheus=\"openshift-user-workload-monitoring/user-workload\", prometheus_replica=\"prometheus-user-workload-0\"}"