level=info ts=2026-02-16T09:59:02.133160121Z caller=main.go:127 msg="Starting Loki" version="(version=3.6.3, branch=release-3.6.x, revision=9385bc63)" level=info ts=2026-02-16T09:59:02.133181637Z caller=main.go:128 msg="Loading configuration file" filename=/etc/loki/config/config.yaml level=debug ts=2026-02-16T09:59:02.133511032Z caller=modules.go:1984 msg="initializing ingester query tags interceptors" level=info ts=2026-02-16T09:59:02.134335015Z caller=server.go:386 msg="server listening on addresses" http=[::]:3100 grpc=[::]:9095 level=debug ts=2026-02-16T09:59:02.134683341Z caller=netutil.go:85 msg="looking for addresses" inf="[eth0 lo]" inet6enabled=false level=debug ts=2026-02-16T09:59:02.134767421Z caller=netutil.go:102 msg="detected highest quality address" ipAddr=10.129.0.76 inf=eth0 level=info ts=2026-02-16T09:59:02.135291087Z caller=memberlist_client.go:484 msg="Using memberlist cluster label and node name" cluster_label= node=loki-ingester-0-12180ce6 ts=2026-02-16T09:59:02.135375437Z caller=memberlist_logger.go:74 level=debug msg="configured Transport is not a NodeAwareTransport and some features may not work as desired" level=debug ts=2026-02-16T09:59:02.135400456Z caller=tcp_transport.go:435 component="memberlist TCPTransport" msg=FinalAdvertiseAddr advertiseAddr=10.129.0.76 advertisePort=7946 level=debug ts=2026-02-16T09:59:02.13541732Z caller=tcp_transport.go:435 component="memberlist TCPTransport" msg=FinalAdvertiseAddr advertiseAddr=10.129.0.76 advertisePort=7946 level=info ts=2026-02-16T09:59:02.135530619Z caller=memberlist_client.go:628 msg="memberlist fast-join starting" nodes_found=1 to_join=4 level=error ts=2026-02-16T09:59:02.138862233Z caller=resolver.go:100 msg="failed to lookup SRV records" host=_memcached-client._tcp.loki-chunks-cache.product-kubearchive-logging.svc.cluster.local err="lookup _memcached-client._tcp.loki-chunks-cache.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host" level=debug ts=2026-02-16T09:59:02.138889288Z caller=memcached_client_selector.go:109 msg="updating servers" servers= count=0 level=info ts=2026-02-16T09:59:02.140084478Z caller=shipper.go:165 index-store=tsdb-2024-04-01 msg="starting index shipper in WO mode" level=info ts=2026-02-16T09:59:02.140568716Z caller=table_manager.go:136 index-store=tsdb-2024-04-01 msg="uploading tables" level=info ts=2026-02-16T09:59:02.140915203Z caller=head_manager.go:313 index-store=tsdb-2024-04-01 component=tsdb-head-manager msg="loaded wals by period" groups=0 level=debug ts=2026-02-16T09:59:02.140939505Z caller=manager.go:263 index-store=tsdb-2024-04-01 component=tsdb-manager msg="building WALs" n=0 ts=2026-02-16T09:59:02.140937323Z level=debug ts=2026-02-16T09:59:02.140958279Z caller=manager.go:287 index-store=tsdb-2024-04-01 component=tsdb-manager msg="recovering tenant heads" level=info ts=2026-02-16T09:59:02.140990797Z caller=manager.go:86 index-store=tsdb-2024-04-01 component=tsdb-manager msg="loaded leftover local indices" err=null successful=true buckets=0 indices=0 failures=0 level=info ts=2026-02-16T09:59:02.141012289Z caller=head_manager.go:313 index-store=tsdb-2024-04-01 component=tsdb-head-manager msg="loaded wals by period" groups=0 level=debug ts=2026-02-16T09:59:02.141021029Z caller=manager.go:263 index-store=tsdb-2024-04-01 component=tsdb-manager msg="building WALs" n=0 ts=2026-02-16T09:59:02.141019543Z level=debug ts=2026-02-16T09:59:02.141028401Z caller=manager.go:287 index-store=tsdb-2024-04-01 component=tsdb-manager msg="recovering tenant heads" level=debug ts=2026-02-16T09:59:02.142799022Z caller=netutil.go:85 component=ingester msg="looking for addresses" inf="[eth0 lo]" inet6enabled=false ts=2026-02-16T09:59:02.142853776Z caller=memberlist_logger.go:74 level=warn msg="Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host" level=info ts=2026-02-16T09:59:02.142870089Z caller=memberlist_client.go:634 msg="fast-joining node failed" node=loki-memberlist.product-kubearchive-logging.svc.cluster.local err="1 error occurred:\n\t* Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host\n\n" level=warn ts=2026-02-16T09:59:02.142883522Z caller=memberlist_client.go:644 msg="memberlist fast-join failed because no node has been successfully reached" elapsed_time=7.354456ms level=error ts=2026-02-16T09:59:02.142893197Z caller=memberlist_client.go:521 msg="failed to fast-join the memberlist cluster at startup" err="no memberlist node reached during fast-join procedure" level=debug ts=2026-02-16T09:59:02.142894445Z caller=netutil.go:102 component=ingester msg="detected highest quality address" ipAddr=10.129.0.76 inf=eth0 level=info ts=2026-02-16T09:59:02.142907205Z caller=memberlist_client.go:666 phase=startup msg="joining memberlist cluster" join_members=loki-memberlist.product-kubearchive-logging.svc.cluster.local ts=2026-02-16T09:59:02.14564761Z caller=memberlist_logger.go:74 level=warn msg="Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host" level=warn ts=2026-02-16T09:59:02.145660718Z caller=memberlist_client.go:700 phase=startup msg="joining memberlist cluster" attempts=1 max_attempts=10 err="1 error occurred:\n\t* Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host\n\n" level=debug ts=2026-02-16T09:59:02.158851349Z caller=module_service.go:72 msg="module waiting for initialization" module=memberlist-kv waiting_for=server level=debug ts=2026-02-16T09:59:02.158865234Z caller=module_service.go:72 msg="module waiting for initialization" module=analytics waiting_for=memberlist-kv level=info ts=2026-02-16T09:59:02.158863725Z caller=module_service.go:82 msg=starting module=runtime-config level=debug ts=2026-02-16T09:59:02.158876401Z caller=module_service.go:72 msg="module waiting for initialization" module=ingester waiting_for=analytics level=info ts=2026-02-16T09:59:02.158877381Z caller=module_service.go:82 msg=starting module=server level=debug ts=2026-02-16T09:59:02.1588878Z caller=module_service.go:72 msg="module waiting for initialization" module=ring waiting_for=server level=debug ts=2026-02-16T09:59:02.158896822Z caller=module_service.go:72 msg="module waiting for initialization" module=store waiting_for=memberlist-kv level=info ts=2026-02-16T09:59:02.158905127Z caller=module_service.go:82 msg=starting module=memberlist-kv level=debug ts=2026-02-16T09:59:02.158917529Z caller=module_service.go:72 msg="module waiting for initialization" module=store waiting_for=runtime-config level=debug ts=2026-02-16T09:59:02.158924026Z caller=module_service.go:72 msg="module waiting for initialization" module=analytics waiting_for=ring level=debug ts=2026-02-16T09:59:02.158954305Z caller=module_service.go:72 msg="module waiting for initialization" module=ring waiting_for=memberlist-kv level=debug ts=2026-02-16T09:59:02.159086754Z caller=module_service.go:72 msg="module waiting for initialization" module=ring waiting_for=runtime-config level=debug ts=2026-02-16T09:59:02.159309717Z caller=module_service.go:72 msg="module waiting for initialization" module=store waiting_for=server level=info ts=2026-02-16T09:59:02.159316189Z caller=module_service.go:82 msg=starting module=store level=info ts=2026-02-16T09:59:02.159331834Z caller=module_service.go:82 msg=starting module=ring level=info ts=2026-02-16T09:59:02.159418719Z caller=ring.go:365 msg="ring doesn't exist in KV store yet" level=debug ts=2026-02-16T09:59:02.159447376Z caller=module_service.go:72 msg="module waiting for initialization" module=analytics waiting_for=runtime-config level=debug ts=2026-02-16T09:59:02.159454485Z caller=module_service.go:72 msg="module waiting for initialization" module=analytics waiting_for=server level=info ts=2026-02-16T09:59:02.159458018Z caller=module_service.go:82 msg=starting module=analytics level=debug ts=2026-02-16T09:59:02.159565272Z caller=module_service.go:72 msg="module waiting for initialization" module=ingester waiting_for=memberlist-kv level=debug ts=2026-02-16T09:59:02.159572061Z caller=module_service.go:72 msg="module waiting for initialization" module=ingester waiting_for=ring level=debug ts=2026-02-16T09:59:02.159576576Z caller=module_service.go:72 msg="module waiting for initialization" module=ingester waiting_for=runtime-config level=debug ts=2026-02-16T09:59:02.159579813Z caller=module_service.go:72 msg="module waiting for initialization" module=ingester waiting_for=server level=debug ts=2026-02-16T09:59:02.15958501Z caller=module_service.go:72 msg="module waiting for initialization" module=ingester waiting_for=store level=info ts=2026-02-16T09:59:02.159591341Z caller=module_service.go:82 msg=starting module=ingester level=info ts=2026-02-16T09:59:02.159621987Z caller=ingester.go:565 component=ingester msg="recovering from checkpoint" level=info ts=2026-02-16T09:59:02.15964655Z caller=recovery.go:42 component=ingester msg="no checkpoint found, treating as no-op" level=info ts=2026-02-16T09:59:02.159738946Z caller=ingester.go:581 component=ingester msg="recovered WAL checkpoint recovery finished" elapsed=127.961µs errors=false level=info ts=2026-02-16T09:59:02.159747952Z caller=ingester.go:587 component=ingester msg="recovering from WAL" level=info ts=2026-02-16T09:59:02.159870666Z caller=ingester.go:603 component=ingester msg="WAL segment recovery finished" elapsed=259.832µs errors=false level=info ts=2026-02-16T09:59:02.159877308Z caller=ingester.go:551 component=ingester msg="closing recoverer" level=info ts=2026-02-16T09:59:02.159886345Z caller=ingester.go:559 component=ingester msg="WAL recovery finished" time=275.347µs level=info ts=2026-02-16T09:59:02.159904638Z caller=wal.go:158 msg=started component=wal level=info ts=2026-02-16T09:59:02.159960969Z caller=lifecycler.go:687 component=ingester msg="not loading tokens from file, tokens file path is empty" level=info ts=2026-02-16T09:59:02.159996421Z caller=ingester.go:772 component=ingester msg="sleeping for initial delay before starting periodic flushing" delay=5.153078521s level=info ts=2026-02-16T09:59:02.15998332Z caller=lifecycler.go:714 component=ingester msg="instance not found in ring, adding with no tokens" ring=ingester level=info ts=2026-02-16T09:59:02.160091295Z caller=ingester.go:467 component=ingester msg="autoforget is enabled and will remove unhealthy instances from the ring after 1m0s with no heartbeat" level=info ts=2026-02-16T09:59:02.160114498Z caller=loki.go:599 msg="Loki started" startup_time=89.957187ms level=debug ts=2026-02-16T09:59:02.160132939Z caller=lifecycler.go:552 component=ingester msg="JoinAfter expired" ring=ingester level=info ts=2026-02-16T09:59:02.160141829Z caller=lifecycler.go:556 component=ingester msg="auto-joining cluster after timeout" ring=ingester level=debug ts=2026-02-16T09:59:02.160226306Z caller=memberlist_client.go:1172 msg="CAS attempt failed" err="no change detected" retry=true ts=2026-02-16T09:59:03.745646835Z caller=memberlist_logger.go:74 level=warn msg="Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host" level=warn ts=2026-02-16T09:59:03.745669994Z caller=memberlist_client.go:700 phase=startup msg="joining memberlist cluster" attempts=2 max_attempts=10 err="1 error occurred:\n\t* Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host\n\n" ts=2026-02-16T09:59:05.849095342Z caller=memberlist_logger.go:74 level=warn msg="Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host" level=warn ts=2026-02-16T09:59:05.849123946Z caller=memberlist_client.go:700 phase=startup msg="joining memberlist cluster" attempts=3 max_attempts=10 err="1 error occurred:\n\t* Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host\n\n" level=warn ts=2026-02-16T09:59:07.161143316Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" ts=2026-02-16T09:59:12.154853982Z caller=memberlist_logger.go:74 level=warn msg="Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host" level=warn ts=2026-02-16T09:59:12.154878342Z caller=memberlist_client.go:700 phase=startup msg="joining memberlist cluster" attempts=4 max_attempts=10 err="1 error occurred:\n\t* Failed to resolve loki-memberlist.product-kubearchive-logging.svc.cluster.local: lookup loki-memberlist.product-kubearchive-logging.svc.cluster.local on 172.30.0.10:53: no such host\n\n" level=warn ts=2026-02-16T09:59:12.161037715Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=warn ts=2026-02-16T09:59:17.160881098Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=debug ts=2026-02-16T09:59:18.375302552Z caller=logging.go:137 msg="GET /ready (503) 129.024µs" ts=2026-02-16T09:59:21.78157629Z caller=memberlist_logger.go:74 level=debug msg="Failed to join 10.129.0.77:7946: dial tcp 10.129.0.77:7946: connect: connection refused" level=warn ts=2026-02-16T09:59:21.781601984Z caller=memberlist_client.go:700 phase=startup msg="joining memberlist cluster" attempts=5 max_attempts=10 err="1 error occurred:\n\t* Failed to join 10.129.0.77:7946: dial tcp 10.129.0.77:7946: connect: connection refused\n\n" level=warn ts=2026-02-16T09:59:22.160955325Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=warn ts=2026-02-16T09:59:27.160679415Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=debug ts=2026-02-16T09:59:28.374143747Z caller=logging.go:137 msg="GET /ready (503) 82.988µs" level=warn ts=2026-02-16T09:59:32.160796617Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=info ts=2026-02-16T09:59:32.160785822Z caller=recalculate_owned_streams.go:49 msg="starting recalculate owned streams job" level=info ts=2026-02-16T09:59:32.160828416Z caller=recalculate_owned_streams.go:63 msg="detected ring changes, re-evaluating streams ownership" level=info ts=2026-02-16T09:59:32.160834904Z caller=recalculate_owned_streams.go:52 msg="completed recalculate owned streams job" level=debug ts=2026-02-16T09:59:34.823930877Z caller=flush.go:215 msg="computed flush rate" rate=3.3333333333333335 level=warn ts=2026-02-16T09:59:37.160726835Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=warn ts=2026-02-16T09:59:42.160652072Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=warn ts=2026-02-16T09:59:47.16038672Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" ts=2026-02-16T09:59:50.582656241Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: 10.129.0.76:7946" ts=2026-02-16T09:59:50.582759519Z caller=memberlist_logger.go:74 level=debug msg="Stream connection from=10.129.0.76:41074" ts=2026-02-16T09:59:50.584011458Z caller=memberlist_logger.go:74 level=debug msg="Failed to join 10.129.0.75:7946: dial tcp 10.129.0.75:7946: connect: connection refused" ts=2026-02-16T09:59:50.584669082Z caller=memberlist_logger.go:74 level=debug msg="Failed to join 10.129.0.77:7946: dial tcp 10.129.0.77:7946: connect: connection refused" ts=2026-02-16T09:59:50.586474533Z caller=memberlist_logger.go:74 level=debug msg="Failed to join 10.130.0.76:7946: dial tcp 10.130.0.76:7946: connect: connection refused" ts=2026-02-16T09:59:50.587909644Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: 10.130.0.74:7946" ts=2026-02-16T09:59:50.590892928Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: 10.130.0.75:7946" ts=2026-02-16T09:59:50.593560103Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: 10.130.0.77:7946" level=info ts=2026-02-16T09:59:50.594895369Z caller=memberlist_client.go:673 phase=startup msg="joining memberlist cluster succeeded" reached_nodes=4 elapsed_time=48.451983339s ts=2026-02-16T09:59:51.780877767Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: loki-compactor-0-b8901257 10.130.0.77:7946" level=warn ts=2026-02-16T09:59:52.16084324Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=warn ts=2026-02-16T09:59:57.160784982Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=debug ts=2026-02-16T10:00:00.319638313Z caller=flush.go:215 msg="computed flush rate" rate=3.3333333333333335 level=debug ts=2026-02-16T10:00:02.140394975Z caller=memcached_client_selector.go:109 msg="updating servers" servers=10.128.0.72:11211 count=1 level=info ts=2026-02-16T10:00:02.141451646Z caller=table_manager.go:136 index-store=tsdb-2024-04-01 msg="uploading tables" level=debug ts=2026-02-16T10:00:02.141634729Z caller=manager.go:249 index-store=tsdb-2024-04-01 component=tsdb-manager msg="building heads" level=debug ts=2026-02-16T10:00:02.141678602Z caller=head_manager.go:440 index-store=tsdb-2024-04-01 component=tsdb-head-manager msg="listed WALs" pd=1968039 n=1 level=debug ts=2026-02-16T10:00:02.14171934Z caller=head_manager.go:445 index-store=tsdb-2024-04-01 component=tsdb-head-manager msg="removing wals" pd=1968039 n=1 level=info ts=2026-02-16T10:00:02.160806089Z caller=recalculate_owned_streams.go:49 msg="starting recalculate owned streams job" level=debug ts=2026-02-16T10:00:02.160827107Z caller=recalculate_owned_streams.go:60 msg="ring is not changed, skipping the job" level=info ts=2026-02-16T10:00:02.160835865Z caller=recalculate_owned_streams.go:52 msg="completed recalculate owned streams job" level=warn ts=2026-02-16T10:00:02.160839062Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=debug ts=2026-02-16T10:00:03.142124805Z caller=reporter.go:244 msg="failed to read cluster seed file" err="failed to get s3 object: operation error S3: GetObject, exceeded maximum number of attempts, 3, https response error StatusCode: 0, RequestID: , HostID: , request send failed, Get \"http://minio:9000/loki-data/loki_cluster_seed.json?x-id=GetObject\": 3 errors occurred:\n\t* dial tcp: lookup minio on 172.30.0.10:53: no such host\n\t* dial tcp: lookup minio on 172.30.0.10:53: no such host\n\t* dial tcp: lookup minio on 172.30.0.10:53: no such host\n\n" level=warn ts=2026-02-16T10:00:07.16075423Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=warn ts=2026-02-16T10:00:12.160845312Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=warn ts=2026-02-16T10:00:17.160322545Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" ts=2026-02-16T10:00:18.410691866Z caller=memberlist_logger.go:74 level=debug msg="Stream connection from=10.130.0.77:58982" level=debug ts=2026-02-16T10:00:20.390634753Z caller=reporter.go:244 msg="failed to read cluster seed file" err="failed to get s3 object: operation error S3: GetObject, exceeded maximum number of attempts, 3, https response error StatusCode: 0, RequestID: , HostID: , request send failed, Get \"http://minio:9000/loki-data/loki_cluster_seed.json?x-id=GetObject\": 3 errors occurred:\n\t* dial tcp: lookup minio on 172.30.0.10:53: no such host\n\t* dial tcp: lookup minio on 172.30.0.10:53: no such host\n\t* dial tcp: lookup minio on 172.30.0.10:53: no such host\n\n" ts=2026-02-16T10:00:21.783660595Z caller=memberlist_logger.go:74 level=debug msg="Initiating push/pull sync with: loki-compactor-0-b8901257 10.130.0.77:7946" level=warn ts=2026-02-16T10:00:22.161090775Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" ts=2026-02-16T10:00:23.711953742Z caller=memberlist_logger.go:74 level=debug msg="Stream connection from=10.130.0.74:37614" level=debug ts=2026-02-16T10:00:26.681695065Z caller=flush.go:215 msg="computed flush rate" rate=3.3333333333333335 level=info ts=2026-02-16T10:00:26.681759136Z caller=flush.go:305 component=ingester msg="flushing stream" user=kubearchive fp=ec4fff4ad1321f8e immediate=false num_chunks=1 total_comp="2.7 MB" avg_comp="2.7 MB" total_uncomp="23 MB" avg_uncomp="23 MB" full=1 labels="{stream=\"openshift-gitops\"}" level=error ts=2026-02-16T10:00:26.690514577Z caller=flush.go:262 component=ingester loop=14 org_id=kubearchive msg="failed to flush" retries=0 err="failed to flush chunks: store put chunk: operation error S3: PutObject, https response error StatusCode: 0, RequestID: , HostID: , request send failed, Put \"http://minio:9000/loki-data/kubearchive/ec4fff4ad1321f8e/19c65e39cf5%3A19c65e4a54e%3A65d27ca0?x-id=PutObject\": dial tcp: lookup minio on 172.30.0.10:53: no such host, num_chunks: 1, labels: {stream=\"openshift-gitops\"}" level=warn ts=2026-02-16T10:00:27.161113684Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" ts=2026-02-16T10:00:29.528004174Z caller=memberlist_logger.go:74 level=debug msg="Stream connection from=10.130.0.75:43786" level=info ts=2026-02-16T10:00:32.16011593Z caller=recalculate_owned_streams.go:49 msg="starting recalculate owned streams job" level=debug ts=2026-02-16T10:00:32.160149548Z caller=recalculate_owned_streams.go:60 msg="ring is not changed, skipping the job" level=info ts=2026-02-16T10:00:32.160158376Z caller=recalculate_owned_streams.go:52 msg="completed recalculate owned streams job" level=warn ts=2026-02-16T10:00:32.160239813Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=warn ts=2026-02-16T10:00:37.160416139Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=debug ts=2026-02-16T10:00:38.340478242Z caller=reporter.go:244 msg="failed to read cluster seed file" err="failed to get s3 object: operation error S3: GetObject, exceeded maximum number of attempts, 3, https response error StatusCode: 0, RequestID: , HostID: , request send failed, Get \"http://minio:9000/loki-data/loki_cluster_seed.json?x-id=GetObject\": 3 errors occurred:\n\t* dial tcp: lookup minio on 172.30.0.10:53: no such host\n\t* dial tcp: lookup minio on 172.30.0.10:53: no such host\n\t* dial tcp: lookup minio on 172.30.0.10:53: no such host\n\n" level=info ts=2026-02-16T10:00:38.564167805Z caller=flush.go:305 component=ingester msg="flushing stream" user=kubearchive fp=ec4fff4ad1321f8e immediate=false num_chunks=1 total_comp="2.7 MB" avg_comp="2.7 MB" total_uncomp="23 MB" avg_uncomp="23 MB" full=1 labels="{stream=\"openshift-gitops\"}" level=error ts=2026-02-16T10:00:38.571830661Z caller=flush.go:262 component=ingester loop=14 org_id=kubearchive msg="failed to flush" retries=1 err="failed to flush chunks: store put chunk: operation error S3: PutObject, https response error StatusCode: 0, RequestID: , HostID: , request send failed, Put \"http://minio:9000/loki-data/kubearchive/ec4fff4ad1321f8e/19c65e39cf5%3A19c65e4a54e%3A65d27ca0?x-id=PutObject\": dial tcp: lookup minio on 172.30.0.10:53: no such host, num_chunks: 1, labels: {stream=\"openshift-gitops\"}" level=warn ts=2026-02-16T10:00:42.160153698Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round" level=warn ts=2026-02-16T10:00:47.160159729Z caller=ingester.go:497 component=ingester msg="autoforget have seen 0 unhealthy ingesters out of 1, network may be partioned, skip forgeting ingesters this round"