W0216 17:48:51.837616 1 cmd.go:245] Using insecure, self-signed certificates I0216 17:48:52.079381 1 start.go:223] Unable to read service ca bundle: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 17:48:52.079556 1 observer_polling.go:159] Starting file observer I0216 17:48:52.340144 1 operator.go:59] Starting insights-operator v0.0.0-master+$Format:%H$ I0216 17:48:52.340305 1 legacy_config.go:327] Current config: {"report":false,"storagePath":"/var/lib/insights-operator","interval":"2h","endpoint":"https://console.redhat.com/api/ingress/v1/upload","conditionalGathererEndpoint":"https://console.redhat.com/api/gathering/v2/%s/gathering_rules","pull_report":{"endpoint":"https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports","delay":"60s","timeout":"3000s","min_retry":"30s"},"impersonate":"system:serviceaccount:openshift-insights:gather","enableGlobalObfuscation":false,"ocm":{"scaEndpoint":"https://api.openshift.com/api/accounts_mgmt/v1/certificates","scaInterval":"8h","scaDisabled":false,"clusterTransferEndpoint":"https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/","clusterTransferInterval":"12h"},"disableInsightsAlerts":false,"processingStatusEndpoint":"https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/request/%s/status","reportEndpointTechPreview":"https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/request/%s/report"} I0216 17:48:52.340626 1 secure_serving.go:57] Forcing use of http/1.1 only W0216 17:48:52.340644 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. W0216 17:48:52.340647 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. W0216 17:48:52.340650 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. W0216 17:48:52.340653 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. W0216 17:48:52.340655 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. W0216 17:48:52.340657 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. I0216 17:48:52.340715 1 simple_featuregate_reader.go:171] Starting feature-gate-detector I0216 17:48:52.347372 1 event.go:364] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-insights", Name:"insights-operator", UID:"ef6ed5f7-b89b-4287-af93-fdf79ce27b9c", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AWSEFSDriverVolumeMetrics", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BareMetalLoadBalancer", "BuildCSIVolumes", "ChunkSizeMiB", "CloudDualStackNodeIPs", "ClusterAPIInstallAWS", "ClusterAPIInstallAzure", "ClusterAPIInstallGCP", "ClusterAPIInstallNutanix", "ClusterAPIInstallOpenStack", "ClusterAPIInstallPowerVS", "ClusterAPIInstallVSphere", "DisableKubeletCloudCredentialProviders", "ExternalCloudProvider", "ExternalCloudProviderAzure", "ExternalCloudProviderExternal", "ExternalCloudProviderGCP", "ExternalOIDC", "GCPLabelsTags", "HardwareSpeed", "IngressControllerLBSubnetsAWS", "KMSv1", "ManagedBootImages", "MetricsServer", "MultiArchInstallAWS", "MultiArchInstallGCP", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NodeDisruptionPolicy", "PrivateHostedZoneAWS", "SetEIPForNLBIngressController", "StreamingCollectionEncodingToJSON", "StreamingCollectionEncodingToProtobuf", "VSphereControlPlaneMachineSet", "VSphereDriverConfiguration", "VSphereStaticIPs", "ValidatingAdmissionPolicy"}, Disabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AutomatedEtcdBackup", "BootcNodeManagement", "CSIDriverSharedResource", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "DNSNameResolver", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "GCPClusterHostedDNS", "GatewayAPI", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "InstallAlternateInfrastructureAWS", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "MachineAPIProviderOpenStack", "MachineConfigNodes", "ManagedBootImagesAWS", "MaxUnavailableStatefulSet", "MetricsCollectionProfiles", "MixedCPUsAllocation", "MultiArchInstallAzure", "NetworkSegmentation", "NewOLM", "NodeSwap", "OVNObservability", "OnClusterBuild", "OpenShiftPodSecurityAdmission", "PersistentIPsForVirtualization", "PinnedImages", "PlatformOperators", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SignatureStores", "SigstoreImageVerification", "TranslateStreamCloseWebsocketRequests", "UpgradeStatus", "UserNamespacesSupport", "VSphereMultiVCenters", "VolumeGroupSnapshot"}} I0216 17:48:52.347399 1 operator.go:124] FeatureGates initialized: knownFeatureGates=[AWSEFSDriverVolumeMetrics AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AutomatedEtcdBackup AzureWorkloadIdentity BareMetalLoadBalancer BootcNodeManagement BuildCSIVolumes CSIDriverSharedResource ChunkSizeMiB CloudDualStackNodeIPs ClusterAPIInstall ClusterAPIInstallAWS ClusterAPIInstallAzure ClusterAPIInstallGCP ClusterAPIInstallIBMCloud ClusterAPIInstallNutanix ClusterAPIInstallOpenStack ClusterAPIInstallPowerVS ClusterAPIInstallVSphere ClusterMonitoringConfig DNSNameResolver DisableKubeletCloudCredentialProviders DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example ExternalCloudProvider ExternalCloudProviderAzure ExternalCloudProviderExternal ExternalCloudProviderGCP ExternalOIDC GCPClusterHostedDNS GCPLabelsTags GatewayAPI HardwareSpeed IngressControllerLBSubnetsAWS InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather InstallAlternateInfrastructureAWS KMSv1 MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController MachineAPIProviderOpenStack MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MaxUnavailableStatefulSet MetricsCollectionProfiles MetricsServer MixedCPUsAllocation MultiArchInstallAWS MultiArchInstallAzure MultiArchInstallGCP NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM NodeDisruptionPolicy NodeSwap OVNObservability OnClusterBuild OpenShiftPodSecurityAdmission PersistentIPsForVirtualization PinnedImages PlatformOperators PrivateHostedZoneAWS ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SignatureStores SigstoreImageVerification StreamingCollectionEncodingToJSON StreamingCollectionEncodingToProtobuf TranslateStreamCloseWebsocketRequests UpgradeStatus UserNamespacesSupport VSphereControlPlaneMachineSet VSphereDriverConfiguration VSphereMultiVCenters VSphereStaticIPs ValidatingAdmissionPolicy VolumeGroupSnapshot] I0216 17:48:52.349182 1 requestheader_controller.go:169] Starting RequestHeaderAuthRequestController I0216 17:48:52.349198 1 shared_informer.go:311] Waiting for caches to sync for RequestHeaderAuthRequestController I0216 17:48:52.349233 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" I0216 17:48:52.349249 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file I0216 17:48:52.349255 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" I0216 17:48:52.349268 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file I0216 17:48:52.349468 1 dynamic_serving_content.go:132] "Starting controller" name="serving-cert::/tmp/serving-cert-4227557015/tls.crt::/tmp/serving-cert-4227557015/tls.key" I0216 17:48:52.349560 1 secure_serving.go:213] Serving securely on [::]:8443 I0216 17:48:52.349583 1 tlsconfig.go:240] "Starting DynamicServingCertificateController" W0216 17:48:52.357963 1 configmapobserver.go:64] Cannot get the configuration config map: configmaps "insights-config" not found. Default configuration is used. I0216 17:48:52.357984 1 secretconfigobserver.go:216] Legacy configuration set: enabled=false endpoint=https://console.redhat.com/api/ingress/v1/upload conditional_gatherer_endpoint=https://console.redhat.com/api/gathering/v2/%s/gathering_rules interval=2h0m0s token=false reportEndpoint=https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports initialPollingDelay=1m0s minRetryTime=30s pollingTimeout=50m0s processingStatusEndpoint=https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/request/%s/status I0216 17:48:52.358077 1 base_controller.go:67] Waiting for caches to sync for ConfigController I0216 17:48:52.365163 1 secretconfigobserver.go:249] Found cloud.openshift.com token I0216 17:48:52.365181 1 secretconfigobserver.go:204] Legacy configuration updated: enabled=true endpoint=https://console.redhat.com/api/ingress/v1/upload conditional_gatherer_endpoint=https://console.redhat.com/api/gathering/v2/%s/gathering_rules interval=2h0m0s token=true reportEndpoint=https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports initialPollingDelay=1m0s minRetryTime=30s pollingTimeout=50m0s processingStatusEndpoint=https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/request/%s/status I0216 17:48:52.371457 1 secretconfigobserver.go:119] support secret does not exist I0216 17:48:52.381050 1 secretconfigobserver.go:249] Found cloud.openshift.com token I0216 17:48:52.388138 1 secretconfigobserver.go:119] support secret does not exist I0216 17:48:52.392505 1 recorder.go:161] Pruning old reports every 6h7m54s, max age is 288h0m0s I0216 17:48:52.406345 1 controllerstatus.go:80] name=insightsuploader healthy=true reason= message= I0216 17:48:52.406352 1 controllerstatus.go:80] name=insightsreport healthy=true reason= message= I0216 17:48:52.406366 1 insightsreport.go:296] Starting report retriever I0216 17:48:52.406371 1 insightsuploader.go:86] Reporting status periodically to https://console.redhat.com/api/ingress/v1/upload every 2h0m0s, starting in 1m30s I0216 17:48:52.406374 1 insightsreport.go:298] Insights analysis reports will be downloaded from the https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports endpoint with a delay of 1m0s I0216 17:48:52.406351 1 periodic.go:214] Running clusterconfig gatherer I0216 17:48:52.406434 1 tasks_processing.go:45] number of workers: 64 I0216 17:48:52.406469 1 tasks_processing.go:69] worker 2 listening for tasks. I0216 17:48:52.406476 1 tasks_processing.go:69] worker 0 listening for tasks. I0216 17:48:52.406484 1 tasks_processing.go:69] worker 1 listening for tasks. I0216 17:48:52.406485 1 tasks_processing.go:69] worker 24 listening for tasks. I0216 17:48:52.406487 1 tasks_processing.go:69] worker 16 listening for tasks. I0216 17:48:52.406490 1 tasks_processing.go:69] worker 17 listening for tasks. I0216 17:48:52.406492 1 tasks_processing.go:69] worker 9 listening for tasks. I0216 17:48:52.406493 1 tasks_processing.go:69] worker 10 listening for tasks. I0216 17:48:52.406496 1 tasks_processing.go:69] worker 21 listening for tasks. I0216 17:48:52.406498 1 tasks_processing.go:69] worker 11 listening for tasks. I0216 17:48:52.406501 1 tasks_processing.go:69] worker 22 listening for tasks. I0216 17:48:52.406505 1 tasks_processing.go:71] worker 11 working on tsdb_status task. I0216 17:48:52.406505 1 tasks_processing.go:71] worker 21 working on machine_sets task. I0216 17:48:52.406508 1 tasks_processing.go:69] worker 23 listening for tasks. I0216 17:48:52.406507 1 tasks_processing.go:69] worker 20 listening for tasks. I0216 17:48:52.406512 1 tasks_processing.go:69] worker 5 listening for tasks. I0216 17:48:52.406513 1 tasks_processing.go:69] worker 48 listening for tasks. I0216 17:48:52.406517 1 tasks_processing.go:69] worker 25 listening for tasks. I0216 17:48:52.406516 1 tasks_processing.go:69] worker 18 listening for tasks. I0216 17:48:52.406521 1 tasks_processing.go:69] worker 19 listening for tasks. I0216 17:48:52.406524 1 tasks_processing.go:69] worker 6 listening for tasks. I0216 17:48:52.406522 1 tasks_processing.go:69] worker 14 listening for tasks. I0216 17:48:52.406528 1 tasks_processing.go:69] worker 15 listening for tasks. I0216 17:48:52.406529 1 tasks_processing.go:69] worker 27 listening for tasks. I0216 17:48:52.406531 1 tasks_processing.go:69] worker 7 listening for tasks. I0216 17:48:52.406525 1 tasks_processing.go:69] worker 26 listening for tasks. I0216 17:48:52.406534 1 tasks_processing.go:69] worker 12 listening for tasks. I0216 17:48:52.406535 1 tasks_processing.go:69] worker 28 listening for tasks. I0216 17:48:52.406538 1 tasks_processing.go:69] worker 51 listening for tasks. W0216 17:48:52.406537 1 gather_prometheus_tsdb_status.go:38] Unable to load metrics client, tsdb status cannot be collected: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 17:48:52.406541 1 tasks_processing.go:69] worker 13 listening for tasks. I0216 17:48:52.406538 1 tasks_processing.go:69] worker 8 listening for tasks. I0216 17:48:52.406544 1 tasks_processing.go:69] worker 29 listening for tasks. I0216 17:48:52.406539 1 tasks_processing.go:69] worker 60 listening for tasks. I0216 17:48:52.406543 1 tasks_processing.go:69] worker 49 listening for tasks. I0216 17:48:52.406547 1 tasks_processing.go:69] worker 50 listening for tasks. I0216 17:48:52.406550 1 tasks_processing.go:69] worker 30 listening for tasks. I0216 17:48:52.406549 1 tasks_processing.go:69] worker 52 listening for tasks. I0216 17:48:52.406550 1 tasks_processing.go:69] worker 55 listening for tasks. I0216 17:48:52.406549 1 gather.go:180] gatherer "clusterconfig" function "tsdb_status" took 34.974µs to process 0 records I0216 17:48:52.406557 1 tasks_processing.go:69] worker 58 listening for tasks. I0216 17:48:52.406563 1 tasks_processing.go:69] worker 63 listening for tasks. I0216 17:48:52.406564 1 tasks_processing.go:69] worker 31 listening for tasks. I0216 17:48:52.406564 1 tasks_processing.go:71] worker 55 working on openshift_machine_api_events task. I0216 17:48:52.406565 1 tasks_processing.go:69] worker 53 listening for tasks. I0216 17:48:52.406569 1 tasks_processing.go:71] worker 0 working on support_secret task. I0216 17:48:52.406571 1 tasks_processing.go:69] worker 62 listening for tasks. I0216 17:48:52.406564 1 tasks_processing.go:71] worker 2 working on olm_operators task. I0216 17:48:52.406575 1 tasks_processing.go:71] worker 17 working on certificate_signing_requests task. I0216 17:48:52.406576 1 tasks_processing.go:69] worker 43 listening for tasks. I0216 17:48:52.406556 1 tasks_processing.go:69] worker 39 listening for tasks. I0216 17:48:52.406554 1 tasks_processing.go:69] worker 54 listening for tasks. I0216 17:48:52.406578 1 tasks_processing.go:71] worker 7 working on nodenetworkconfigurationpolicies task. I0216 17:48:52.406585 1 tasks_processing.go:71] worker 39 working on sap_pods task. I0216 17:48:52.406557 1 tasks_processing.go:69] worker 40 listening for tasks. I0216 17:48:52.406588 1 tasks_processing.go:69] worker 56 listening for tasks. I0216 17:48:52.406552 1 tasks_processing.go:71] worker 1 working on image task. I0216 17:48:52.406584 1 tasks_processing.go:69] worker 32 listening for tasks. I0216 17:48:52.406590 1 tasks_processing.go:69] worker 33 listening for tasks. I0216 17:48:52.406603 1 tasks_processing.go:71] worker 32 working on operators_pods_and_events task. I0216 17:48:52.406771 1 tasks_processing.go:69] worker 36 listening for tasks. I0216 17:48:52.406800 1 tasks_processing.go:71] worker 33 working on image_pruners task. I0216 17:48:52.406866 1 tasks_processing.go:71] worker 49 working on image_registries task. I0216 17:48:52.406927 1 tasks_processing.go:69] worker 37 listening for tasks. I0216 17:48:52.406959 1 tasks_processing.go:71] worker 37 working on silenced_alerts task. W0216 17:48:52.407001 1 gather_silenced_alerts.go:38] Unable to load alerts client, no alerts will be collected: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 17:48:52.407074 1 tasks_processing.go:71] worker 9 working on dvo_metrics task. I0216 17:48:52.407111 1 gather.go:180] gatherer "clusterconfig" function "silenced_alerts" took 42.898µs to process 0 records I0216 17:48:52.407236 1 tasks_processing.go:69] worker 38 listening for tasks. I0216 17:48:52.406563 1 tasks_processing.go:69] worker 41 listening for tasks. I0216 17:48:52.406570 1 tasks_processing.go:69] worker 42 listening for tasks. I0216 17:48:52.406570 1 tasks_processing.go:71] worker 63 working on operators task. I0216 17:48:52.406571 1 tasks_processing.go:71] worker 31 working on node_logs task. I0216 17:48:52.407370 1 tasks_processing.go:71] worker 15 working on nodenetworkstates task. I0216 17:48:52.407408 1 tasks_processing.go:71] worker 19 working on openstack_version task. I0216 17:48:52.407493 1 tasks_processing.go:71] worker 20 working on jaegers task. I0216 17:48:52.407561 1 tasks_processing.go:69] worker 45 listening for tasks. I0216 17:48:52.407675 1 tasks_processing.go:71] worker 51 working on number_of_pods_and_netnamespaces_with_sdn_annotations task. I0216 17:48:52.407729 1 tasks_processing.go:71] worker 52 working on sap_license_management_logs task. I0216 17:48:52.407751 1 tasks_processing.go:71] worker 24 working on sap_config task. I0216 17:48:52.407786 1 tasks_processing.go:71] worker 12 working on mutating_webhook_configurations task. I0216 17:48:52.407813 1 tasks_processing.go:71] worker 14 working on openshift_apiserver_operator_logs task. I0216 17:48:52.407411 1 tasks_processing.go:71] worker 23 working on validating_webhook_configurations task. I0216 17:48:52.407795 1 tasks_processing.go:71] worker 48 working on ingress_certificates task. I0216 17:48:52.407738 1 tasks_processing.go:69] worker 47 listening for tasks. I0216 17:48:52.407504 1 tasks_processing.go:69] worker 44 listening for tasks. I0216 17:48:52.406581 1 tasks_processing.go:71] worker 43 working on container_images task. I0216 17:48:52.407687 1 tasks_processing.go:71] worker 5 working on openstack_dataplanedeployments task. I0216 17:48:52.406592 1 tasks_processing.go:71] worker 40 working on metrics task. I0216 17:48:52.407825 1 tasks_processing.go:71] worker 25 working on cost_management_metrics_configs task. I0216 17:48:52.406605 1 tasks_processing.go:69] worker 35 listening for tasks. I0216 17:48:52.408150 1 tasks_processing.go:71] worker 27 working on openshift_logging task. I0216 17:48:52.408159 1 tasks_processing.go:71] worker 35 working on crds task. I0216 17:48:52.406587 1 tasks_processing.go:71] worker 54 working on version task. I0216 17:48:52.406577 1 tasks_processing.go:69] worker 59 listening for tasks. I0216 17:48:52.406806 1 tasks_processing.go:71] worker 60 working on openshift_authentication_logs task. I0216 17:48:52.408223 1 tasks_processing.go:71] worker 42 working on machines task. I0216 17:48:52.407369 1 tasks_processing.go:71] worker 50 working on scheduler_logs task. I0216 17:48:52.406694 1 tasks_processing.go:71] worker 8 working on kube_controller_manager_logs task. I0216 17:48:52.406597 1 tasks_processing.go:71] worker 56 working on ceph_cluster task. I0216 17:48:52.406598 1 tasks_processing.go:69] worker 34 listening for tasks. I0216 17:48:52.406499 1 tasks_processing.go:69] worker 3 listening for tasks. I0216 17:48:52.406565 1 tasks_processing.go:69] worker 57 listening for tasks. I0216 17:48:52.406798 1 tasks_processing.go:71] worker 29 working on lokistack task. I0216 17:48:52.406808 1 tasks_processing.go:71] worker 36 working on authentication task. I0216 17:48:52.407324 1 tasks_processing.go:71] worker 22 working on machine_config_pools task. I0216 17:48:52.407422 1 tasks_processing.go:69] worker 46 listening for tasks. I0216 17:48:52.406559 1 tasks_processing.go:69] worker 61 listening for tasks. I0216 17:48:52.407806 1 tasks_processing.go:71] worker 6 working on ingress task. I0216 17:48:52.406578 1 tasks_processing.go:71] worker 62 working on aggregated_monitoring_cr_names task. I0216 17:48:52.407951 1 tasks_processing.go:71] worker 10 working on proxies task. I0216 17:48:52.406505 1 tasks_processing.go:69] worker 4 listening for tasks. I0216 17:48:52.406565 1 tasks_processing.go:71] worker 58 working on machine_healthchecks task. I0216 17:48:52.407957 1 tasks_processing.go:71] worker 44 working on sap_datahubs task. I0216 17:48:52.407957 1 tasks_processing.go:71] worker 30 working on active_alerts task. I0216 17:48:52.407962 1 tasks_processing.go:71] worker 37 working on qemu_kubevirt_launcher_logs task. I0216 17:48:52.407964 1 tasks_processing.go:71] worker 18 working on install_plans task. I0216 17:48:52.407970 1 tasks_processing.go:71] worker 45 working on feature_gates task. I0216 17:48:52.407976 1 tasks_processing.go:71] worker 47 working on pdbs task. I0216 17:48:52.408803 1 tasks_processing.go:71] worker 11 working on storage_cluster task. I0216 17:48:52.407976 1 tasks_processing.go:71] worker 38 working on machine_configs task. I0216 17:48:52.408822 1 tasks_processing.go:71] worker 59 working on openstack_controlplanes task. I0216 17:48:52.408835 1 tasks_processing.go:71] worker 26 working on openstack_dataplanenodesets task. W0216 17:48:52.408526 1 gather_active_alerts.go:54] Unable to load alerts client, no alerts will be collected: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 17:48:52.408781 1 tasks_processing.go:71] worker 28 working on clusterroles task. I0216 17:48:52.408857 1 gather.go:180] gatherer "clusterconfig" function "active_alerts" took 329.635µs to process 0 records I0216 17:48:52.408850 1 tasks_processing.go:71] worker 30 working on pod_network_connectivity_checks task. W0216 17:48:52.408907 1 gather_most_recent_metrics.go:64] Unable to load metrics client, no metrics will be collected: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 17:48:52.408921 1 tasks_processing.go:71] worker 40 working on infrastructures task. I0216 17:48:52.408935 1 gather.go:180] gatherer "clusterconfig" function "metrics" took 911.364µs to process 0 records I0216 17:48:52.409000 1 tasks_processing.go:71] worker 41 working on container_runtime_configs task. I0216 17:48:52.409022 1 tasks_processing.go:71] worker 16 working on cluster_apiserver task. I0216 17:48:52.409072 1 tasks_processing.go:71] worker 13 working on oauths task. I0216 17:48:52.409088 1 tasks_processing.go:71] worker 53 working on config_maps task. I0216 17:48:52.409312 1 tasks_processing.go:71] worker 34 working on machine_autoscalers task. I0216 17:48:52.409332 1 tasks_processing.go:71] worker 3 working on nodes task. I0216 17:48:52.409366 1 tasks_processing.go:71] worker 57 working on service_accounts task. I0216 17:48:52.409535 1 tasks_processing.go:71] worker 46 working on overlapping_namespace_uids task. I0216 17:48:52.409611 1 tasks_processing.go:71] worker 61 working on storage_classes task. I0216 17:48:52.409826 1 tasks_processing.go:71] worker 4 working on schedulers task. I0216 17:48:52.436150 1 controller.go:119] Initializing last reported time to 0001-01-01T00:00:00Z I0216 17:48:52.436168 1 controller.go:203] Source periodic-clusterconfig *controllerstatus.Simple is not ready I0216 17:48:52.436173 1 controller.go:203] Source periodic-conditional *controllerstatus.Simple is not ready I0216 17:48:52.436176 1 controller.go:203] Source periodic-workloads *controllerstatus.Simple is not ready I0216 17:48:52.436191 1 controller.go:457] The operator is still being initialized I0216 17:48:52.436197 1 controller.go:482] The operator is healthy I0216 17:48:52.449974 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file I0216 17:48:52.449998 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file I0216 17:48:52.450010 1 shared_informer.go:318] Caches are synced for RequestHeaderAuthRequestController I0216 17:48:52.458125 1 base_controller.go:73] Caches are synced for ConfigController I0216 17:48:52.458136 1 base_controller.go:110] Starting #1 worker of ConfigController controller ... I0216 17:48:52.458144 1 tasks_processing.go:71] worker 1 working on monitoring_persistent_volumes task. I0216 17:48:52.458235 1 recorder.go:75] Recording config/image with fingerprint=e217d72d337e4c9f3dbe7a4b07bb62e184df336218596d33e358a6a0ae04675e I0216 17:48:52.458254 1 gather.go:180] gatherer "clusterconfig" function "image" took 51.543652ms to process 1 records I0216 17:48:52.458340 1 tasks_processing.go:71] worker 33 working on networks task. I0216 17:48:52.458458 1 recorder.go:75] Recording config/clusteroperator/imageregistry.operator.openshift.io/imagepruner/cluster with fingerprint=afb106e68f165d48457b72abc61dafdafe5ae24ad5424e86f12da807f2c636d5 I0216 17:48:52.458466 1 gather.go:180] gatherer "clusterconfig" function "image_pruners" took 51.297439ms to process 1 records I0216 17:48:52.458477 1 gather.go:180] gatherer "clusterconfig" function "olm_operators" took 51.60874ms to process 0 records I0216 17:48:52.458481 1 tasks_processing.go:74] worker 2 stopped. W0216 17:48:52.461114 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 17:48:52.463936 1 tasks_processing.go:74] worker 27 stopped. I0216 17:48:52.463946 1 gather.go:180] gatherer "clusterconfig" function "openshift_logging" took 55.773183ms to process 0 records I0216 17:48:52.463955 1 tasks_processing.go:74] worker 11 stopped. I0216 17:48:52.463965 1 gather.go:180] gatherer "clusterconfig" function "storage_cluster" took 55.144101ms to process 0 records E0216 17:48:52.463976 1 gather.go:143] gatherer "clusterconfig" function "machine_healthchecks" failed with the error: machinehealthchecks.machine.openshift.io is forbidden: User "system:serviceaccount:openshift-insights:gather" cannot list resource "machinehealthchecks" in API group "machine.openshift.io" at the cluster scope I0216 17:48:52.463984 1 gather.go:180] gatherer "clusterconfig" function "machine_healthchecks" took 55.4589ms to process 0 records I0216 17:48:52.463992 1 tasks_processing.go:74] worker 58 stopped. I0216 17:48:52.480141 1 tasks_processing.go:74] worker 20 stopped. I0216 17:48:52.480154 1 gather.go:180] gatherer "clusterconfig" function "jaegers" took 72.628441ms to process 0 records I0216 17:48:52.480161 1 gather.go:180] gatherer "clusterconfig" function "openstack_version" took 72.697225ms to process 0 records I0216 17:48:52.480166 1 gather.go:180] gatherer "clusterconfig" function "sap_config" took 72.382557ms to process 0 records I0216 17:48:52.480170 1 tasks_processing.go:74] worker 24 stopped. I0216 17:48:52.480179 1 tasks_processing.go:74] worker 7 stopped. I0216 17:48:52.480189 1 gather.go:180] gatherer "clusterconfig" function "nodenetworkconfigurationpolicies" took 73.589299ms to process 0 records I0216 17:48:52.480194 1 gather.go:180] gatherer "clusterconfig" function "machine_configs" took 71.356827ms to process 0 records I0216 17:48:52.480198 1 gather.go:180] gatherer "clusterconfig" function "openstack_dataplanedeployments" took 72.21264ms to process 0 records I0216 17:48:52.480202 1 gather.go:180] gatherer "clusterconfig" function "openstack_controlplanes" took 71.363245ms to process 0 records I0216 17:48:52.480204 1 tasks_processing.go:74] worker 38 stopped. I0216 17:48:52.480207 1 gather.go:180] gatherer "clusterconfig" function "openstack_dataplanenodesets" took 71.349286ms to process 0 records I0216 17:48:52.480207 1 tasks_processing.go:74] worker 5 stopped. I0216 17:48:52.480210 1 tasks_processing.go:74] worker 59 stopped. I0216 17:48:52.480213 1 gather.go:180] gatherer "clusterconfig" function "sap_datahubs" took 71.69862ms to process 0 records I0216 17:48:52.480215 1 tasks_processing.go:74] worker 26 stopped. I0216 17:48:52.480218 1 tasks_processing.go:74] worker 44 stopped. E0216 17:48:52.480222 1 gather.go:143] gatherer "clusterconfig" function "pod_network_connectivity_checks" failed with the error: the server could not find the requested resource (get podnetworkconnectivitychecks.controlplane.operator.openshift.io) I0216 17:48:52.480228 1 gather.go:180] gatherer "clusterconfig" function "pod_network_connectivity_checks" took 71.333748ms to process 0 records I0216 17:48:52.480231 1 gather_sap_vsystem_iptables_logs.go:60] SAP resources weren't found I0216 17:48:52.480229 1 tasks_processing.go:74] worker 19 stopped. I0216 17:48:52.480237 1 tasks_processing.go:74] worker 30 stopped. I0216 17:48:52.480241 1 tasks_processing.go:74] worker 25 stopped. I0216 17:48:52.480237 1 gather.go:180] gatherer "clusterconfig" function "cost_management_metrics_configs" took 72.192732ms to process 0 records I0216 17:48:52.480247 1 gather.go:180] gatherer "clusterconfig" function "machine_config_pools" took 71.843922ms to process 0 records I0216 17:48:52.480251 1 gather.go:180] gatherer "clusterconfig" function "nodenetworkstates" took 72.80485ms to process 0 records I0216 17:48:52.480254 1 gather.go:180] gatherer "clusterconfig" function "container_runtime_configs" took 71.212324ms to process 0 records I0216 17:48:52.480257 1 gather.go:180] gatherer "clusterconfig" function "machine_sets" took 73.711142ms to process 0 records I0216 17:48:52.480260 1 gather.go:180] gatherer "clusterconfig" function "sap_license_management_logs" took 72.482012ms to process 0 records I0216 17:48:52.480263 1 tasks_processing.go:74] worker 52 stopped. I0216 17:48:52.480266 1 tasks_processing.go:74] worker 22 stopped. I0216 17:48:52.480268 1 tasks_processing.go:74] worker 15 stopped. I0216 17:48:52.480270 1 tasks_processing.go:74] worker 41 stopped. I0216 17:48:52.480272 1 tasks_processing.go:74] worker 21 stopped. I0216 17:48:52.493864 1 tasks_processing.go:74] worker 42 stopped. E0216 17:48:52.493876 1 gather.go:143] gatherer "clusterconfig" function "machines" failed with the error: machines.machine.openshift.io is forbidden: User "system:serviceaccount:openshift-insights:gather" cannot list resource "machines" in API group "machine.openshift.io" at the cluster scope I0216 17:48:52.493883 1 gather.go:180] gatherer "clusterconfig" function "machines" took 85.627846ms to process 0 records I0216 17:48:52.497043 1 tasks_processing.go:74] worker 31 stopped. I0216 17:48:52.497058 1 gather.go:180] gatherer "clusterconfig" function "node_logs" took 89.70314ms to process 0 records I0216 17:48:52.497063 1 gather.go:180] gatherer "clusterconfig" function "sap_pods" took 90.460285ms to process 0 records I0216 17:48:52.497067 1 tasks_processing.go:74] worker 39 stopped. I0216 17:48:52.499877 1 gather_logs.go:145] no pods in openshift-apiserver-operator namespace were found I0216 17:48:52.499889 1 tasks_processing.go:74] worker 14 stopped. I0216 17:48:52.499895 1 gather.go:180] gatherer "clusterconfig" function "openshift_apiserver_operator_logs" took 92.042514ms to process 0 records I0216 17:48:52.501279 1 tasks_processing.go:74] worker 17 stopped. I0216 17:48:52.501287 1 gather.go:180] gatherer "clusterconfig" function "certificate_signing_requests" took 94.693126ms to process 0 records I0216 17:48:52.504075 1 tasks_processing.go:74] worker 10 stopped. I0216 17:48:52.504124 1 recorder.go:75] Recording config/proxy with fingerprint=1b1a6e35475a2dfbe0d04fdf8d7ac0609c57469968f86833d4f74773f637ccbc I0216 17:48:52.504133 1 gather.go:180] gatherer "clusterconfig" function "proxies" took 95.610985ms to process 1 records I0216 17:48:52.509205 1 tasks_processing.go:74] worker 46 stopped. I0216 17:48:52.509231 1 recorder.go:75] Recording config/namespaces_with_overlapping_uids with fingerprint=4f53cda18c2baa0c0354bb5f9a3ecbe5ed12ab4d8e11ba873c2f11161202b945 I0216 17:48:52.509245 1 gather.go:180] gatherer "clusterconfig" function "overlapping_namespace_uids" took 99.655761ms to process 1 records I0216 17:48:52.540908 1 tasks_processing.go:74] worker 34 stopped. I0216 17:48:52.540921 1 gather.go:180] gatherer "clusterconfig" function "machine_autoscalers" took 131.582987ms to process 0 records I0216 17:48:52.540938 1 tasks_processing.go:74] worker 29 stopped. I0216 17:48:52.540947 1 gather.go:180] gatherer "clusterconfig" function "lokistack" took 132.622941ms to process 0 records I0216 17:48:52.540958 1 tasks_processing.go:74] worker 55 stopped. I0216 17:48:52.540966 1 gather.go:180] gatherer "clusterconfig" function "openshift_machine_api_events" took 134.387691ms to process 0 records I0216 17:48:52.540976 1 tasks_processing.go:74] worker 12 stopped. I0216 17:48:52.541113 1 recorder.go:75] Recording config/mutatingwebhookconfigurations/aws-pod-identity with fingerprint=203e927227a6450164d6eb8d85c21a2dec96cab2be605a2f67266cb629d43a68 I0216 17:48:52.541133 1 recorder.go:75] Recording config/mutatingwebhookconfigurations/sre-podimagespec-mutation with fingerprint=2119c9494edb2b2334b6c18d4ec7236be1cb0150b4cdf1b1c4376efc3c4db137 I0216 17:48:52.541146 1 recorder.go:75] Recording config/mutatingwebhookconfigurations/sre-service-mutation with fingerprint=440502bde1f1b755b25cd08dce71a11d158f61f1ab753c966bc25c12ec0981f6 I0216 17:48:52.541151 1 gather.go:180] gatherer "clusterconfig" function "mutating_webhook_configurations" took 133.15918ms to process 3 records I0216 17:48:52.541387 1 tasks_processing.go:74] worker 49 stopped. I0216 17:48:52.541640 1 recorder.go:75] Recording config/clusteroperator/imageregistry.operator.openshift.io/config/cluster with fingerprint=0d07fa941ce833e87498975a3bd31b8b13d1bed206bb63f1039b8e3da493044c I0216 17:48:52.541650 1 gather.go:180] gatherer "clusterconfig" function "image_registries" took 134.476761ms to process 1 records I0216 17:48:52.546989 1 tasks_processing.go:74] worker 56 stopped. I0216 17:48:52.547001 1 gather.go:180] gatherer "clusterconfig" function "ceph_cluster" took 138.683247ms to process 0 records I0216 17:48:52.547315 1 tasks_processing.go:74] worker 40 stopped. I0216 17:48:52.547772 1 recorder.go:75] Recording config/infrastructure with fingerprint=178125440aebb217586d5952da593d3385320d021583acf233738ae0b88a7eb4 I0216 17:48:52.547785 1 gather.go:180] gatherer "clusterconfig" function "infrastructures" took 138.38703ms to process 1 records I0216 17:48:52.547878 1 tasks_processing.go:74] worker 16 stopped. I0216 17:48:52.547882 1 recorder.go:75] Recording config/apiserver with fingerprint=8a80a0cd15224e6686dc6b2110b3de164414a61c13d0ee169c408dc315f9698d I0216 17:48:52.547891 1 gather.go:180] gatherer "clusterconfig" function "cluster_apiserver" took 138.306056ms to process 1 records I0216 17:48:52.547979 1 tasks_processing.go:74] worker 36 stopped. I0216 17:48:52.548030 1 recorder.go:75] Recording config/authentication with fingerprint=796d7d982a193b16d7c65edfb1e2a1fc450c1cd48bb2df4fdb6eb913c0f6c4cd I0216 17:48:52.548038 1 gather.go:180] gatherer "clusterconfig" function "authentication" took 139.12121ms to process 1 records I0216 17:48:52.549109 1 sca.go:98] Pulling SCA certificates from https://api.openshift.com/api/accounts_mgmt/v1/certificates. Next check is in 8h0m0s I0216 17:48:52.549161 1 cluster_transfer.go:78] checking the availability of cluster transfer. Next check is in 12h0m0s W0216 17:48:52.549200 1 operator.go:286] started I0216 17:48:52.549215 1 base_controller.go:67] Waiting for caches to sync for LoggingSyncer I0216 17:48:52.549354 1 tasks_processing.go:74] worker 4 stopped. I0216 17:48:52.549468 1 recorder.go:75] Recording config/schedulers/cluster with fingerprint=faebb1ad2d1d0e12b00a56d9454891d6d4a4bf536fb64a26e47a672eb71499be I0216 17:48:52.549485 1 gather.go:180] gatherer "clusterconfig" function "schedulers" took 139.517301ms to process 1 records I0216 17:48:52.549602 1 tasks_processing.go:74] worker 6 stopped. I0216 17:48:52.549770 1 recorder.go:75] Recording config/ingress with fingerprint=cccd30a5998c93e0efb371667d5b47b611085619b386ae35c151c6e38c2a2778 I0216 17:48:52.549787 1 gather.go:180] gatherer "clusterconfig" function "ingress" took 141.197039ms to process 1 records I0216 17:48:52.549852 1 tasks_processing.go:74] worker 61 stopped. I0216 17:48:52.549909 1 recorder.go:75] Recording config/storage/storageclasses/gp2-csi with fingerprint=d9da29cfcaacd2f513db91f0d9e868126d315f75665b8bdc131721c6c6905f8a I0216 17:48:52.549925 1 recorder.go:75] Recording config/storage/storageclasses/gp3-csi with fingerprint=78441aac9cd4875b89018e4b8c90ca0bc6cfe1c180f002bfacefe97ef8e7150c I0216 17:48:52.549930 1 gather.go:180] gatherer "clusterconfig" function "storage_classes" took 140.224459ms to process 2 records I0216 17:48:52.551105 1 tasks_processing.go:74] worker 0 stopped. E0216 17:48:52.551118 1 gather.go:143] gatherer "clusterconfig" function "support_secret" failed with the error: secrets "support" not found I0216 17:48:52.551127 1 gather.go:180] gatherer "clusterconfig" function "support_secret" took 144.52864ms to process 0 records I0216 17:48:52.551314 1 tasks_processing.go:74] worker 23 stopped. I0216 17:48:52.551539 1 recorder.go:75] Recording config/validatingwebhookconfigurations/multus.openshift.io with fingerprint=78c13ff558127455f2f6d661229dfe9133bff28b9afb8431ebe94dbb55110d87 I0216 17:48:52.551629 1 recorder.go:75] Recording config/validatingwebhookconfigurations/network-node-identity.openshift.io with fingerprint=5d1f42caf99464269106567cadaf1c1abdfae585867dec732b2ea96d7584805c I0216 17:48:52.551664 1 recorder.go:75] Recording config/validatingwebhookconfigurations/performance-addon-operator with fingerprint=fab053127ffe71a3ad6b310b5d620c916485b820ef342e367176bcf59785eefe I0216 17:48:52.551709 1 recorder.go:75] Recording config/validatingwebhookconfigurations/snapshot.storage.k8s.io with fingerprint=31db8a2f0ad40d5c6936f3db12050241e4ebaf0d2bf1f3fba12608d8307ec959 I0216 17:48:52.551746 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-clusterrolebindings-validation with fingerprint=1e8d2d90bcfade8053c81afbb0db08c74bb1aada64b593d39c614ca0eaa500a1 I0216 17:48:52.551784 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-clusterroles-validation with fingerprint=ce3ff0279eaae769ef1d750d959cfa5ec7d449c27d59b8ee4b9a40e7b6084d82 I0216 17:48:52.551824 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-ingress-config-validation with fingerprint=60bb0b1c02474f053304bf60383cc3d4e8b2ccc342819a3553af594c0ea74fdb I0216 17:48:52.551922 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-regular-user-validation with fingerprint=f151682c405d599089b8b450764714287a85836197e35021c74b83d9117c391f I0216 17:48:52.551959 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-scc-validation with fingerprint=f8f641d5cc588b8ba4b7630f9b669ba6b8a6a19a14f3b9216b6f53e27cf75eb2 I0216 17:48:52.551982 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-serviceaccount-validation with fingerprint=94f2c6851dd6e43bff160a61134992f80cb6c736ca5bf67ea8f46d63aaf80c64 I0216 17:48:52.552001 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-techpreviewnoupgrade-validation with fingerprint=e8f9377bab1eab1800763f9e1ec8350b31976a428d05fa30e151ce6f32f1a6e5 I0216 17:48:52.552007 1 gather.go:180] gatherer "clusterconfig" function "validating_webhook_configurations" took 143.416991ms to process 11 records I0216 17:48:52.552216 1 gather_logs.go:145] no pods in openshift-kube-scheduler namespace were found I0216 17:48:52.552228 1 tasks_processing.go:74] worker 50 stopped. I0216 17:48:52.552236 1 gather.go:180] gatherer "clusterconfig" function "scheduler_logs" took 143.991553ms to process 0 records I0216 17:48:52.552493 1 tasks_processing.go:74] worker 45 stopped. I0216 17:48:52.552173 1 configmapobserver.go:84] configmaps "insights-config" not found I0216 17:48:52.552708 1 recorder.go:75] Recording config/featuregate with fingerprint=2e93220be1356814aa86b13bee02aa13d486e5da4059bedd7755e0c3de3b8160 I0216 17:48:52.552807 1 gather.go:180] gatherer "clusterconfig" function "feature_gates" took 143.721169ms to process 1 records I0216 17:48:52.552905 1 tasks_processing.go:74] worker 47 stopped. I0216 17:48:52.553271 1 recorder.go:75] Recording config/pdbs/openshift-image-registry/image-registry with fingerprint=e760fcda3bfeabb3011178aa49ab73f9827a0ba2fcdcf65ed94c1f167dd51d3b I0216 17:48:52.553299 1 recorder.go:75] Recording config/pdbs/openshift-ingress/router-default with fingerprint=48656e91c9d877b0986e1d4d2b6dd4cfd412ba3a37a26374a34cabc81e1c5b41 I0216 17:48:52.553319 1 recorder.go:75] Recording config/pdbs/openshift-operator-lifecycle-manager/packageserver-pdb with fingerprint=3c4707b3672d7af0369b79f389f48d79b3516e84d92e6d51624dd8c0ac57f55f I0216 17:48:52.553326 1 gather.go:180] gatherer "clusterconfig" function "pdbs" took 143.793106ms to process 3 records I0216 17:48:52.553415 1 tasks_processing.go:74] worker 13 stopped. I0216 17:48:52.553543 1 recorder.go:75] Recording config/oauth with fingerprint=8c45b04c24924bb732e6930c966e166b19b81ba357c36cdeb4dffd1b55fe2b6c I0216 17:48:52.553553 1 gather.go:180] gatherer "clusterconfig" function "oauths" took 144.215907ms to process 1 records I0216 17:48:52.567475 1 gather_logs.go:145] no pods in openshift-authentication namespace were found I0216 17:48:52.567484 1 tasks_processing.go:74] worker 60 stopped. I0216 17:48:52.567489 1 gather.go:180] gatherer "clusterconfig" function "openshift_authentication_logs" took 159.256035ms to process 0 records I0216 17:48:52.576961 1 gather_logs.go:145] no pods in openshift-kube-controller-manager namespace were found I0216 17:48:52.576975 1 tasks_processing.go:74] worker 8 stopped. I0216 17:48:52.576983 1 gather.go:180] gatherer "clusterconfig" function "kube_controller_manager_logs" took 168.695609ms to process 0 records I0216 17:48:52.577021 1 tasks_processing.go:74] worker 1 stopped. I0216 17:48:52.577034 1 gather.go:180] gatherer "clusterconfig" function "monitoring_persistent_volumes" took 118.865134ms to process 0 records I0216 17:48:52.577176 1 tasks_processing.go:74] worker 33 stopped. I0216 17:48:52.577257 1 recorder.go:75] Recording config/network with fingerprint=3902603e2653615a4b50d138a5fa246f683f7f89c594c87505f1b8d065405e6f I0216 17:48:52.577267 1 gather.go:180] gatherer "clusterconfig" function "networks" took 118.82322ms to process 1 records I0216 17:48:52.577290 1 controller.go:203] Source periodic-clusterconfig *controllerstatus.Simple is not ready I0216 17:48:52.577300 1 controller.go:203] Source periodic-conditional *controllerstatus.Simple is not ready I0216 17:48:52.577303 1 controller.go:203] Source periodic-workloads *controllerstatus.Simple is not ready I0216 17:48:52.577308 1 controller.go:203] Source scaController *sca.Controller is not ready I0216 17:48:52.577312 1 controller.go:203] Source clusterTransferController *clustertransfer.Controller is not ready I0216 17:48:52.577329 1 controller.go:457] The operator is still being initialized I0216 17:48:52.577339 1 controller.go:482] The operator is healthy I0216 17:48:52.578056 1 gather_logs.go:145] no pods in namespace were found I0216 17:48:52.578069 1 tasks_processing.go:74] worker 37 stopped. I0216 17:48:52.578076 1 gather.go:180] gatherer "clusterconfig" function "qemu_kubevirt_launcher_logs" took 169.530841ms to process 0 records I0216 17:48:52.579587 1 requests.go:204] Asking for SCA certificate for x86_64 architecture I0216 17:48:52.580505 1 tasks_processing.go:74] worker 28 stopped. I0216 17:48:52.580661 1 recorder.go:75] Recording cluster-scoped-resources/rbac.authorization.k8s.io/clusterroles/admin with fingerprint=a0e4ea4d04ee4b89160794788bbcf80a82fa6d7537cc37e5e7e1d976374b9ad9 I0216 17:48:52.580714 1 recorder.go:75] Recording cluster-scoped-resources/rbac.authorization.k8s.io/clusterroles/edit with fingerprint=308ece2a98ba5e8fe0b327a75474edce34b3004a1d958cb9eb332d93d42a7a19 I0216 17:48:52.580722 1 gather.go:180] gatherer "clusterconfig" function "clusterroles" took 171.644567ms to process 2 records I0216 17:48:52.580795 1 tasks_processing.go:74] worker 3 stopped. I0216 17:48:52.581579 1 recorder.go:75] Recording config/node/ip-10-0-141-207.ec2.internal with fingerprint=870e721f519ac8528e95c38577635c1465b31b410f9b19cd0dbc930b5400c874 I0216 17:48:52.581728 1 recorder.go:75] Recording config/node/ip-10-0-151-109.ec2.internal with fingerprint=f8633c7e560afdfc2d24ea88610e7a98ecdf2934b595b0dbdccb3d846565a552 I0216 17:48:52.581821 1 recorder.go:75] Recording config/node/ip-10-0-160-207.ec2.internal with fingerprint=117d47c59c538e45a003803897b108273acf193aa8e9a79e5504c6263e0915a7 I0216 17:48:52.581833 1 gather.go:180] gatherer "clusterconfig" function "nodes" took 171.171906ms to process 3 records I0216 17:48:52.581849 1 gather.go:180] gatherer "clusterconfig" function "aggregated_monitoring_cr_names" took 172.199777ms to process 0 records I0216 17:48:52.581857 1 tasks_processing.go:74] worker 62 stopped. W0216 17:48:52.583417 1 sca.go:117] Failed to pull SCA certs from https://api.openshift.com/api/accounts_mgmt/v1/certificates: unable to retrieve SCA certs data from https://api.openshift.com/api/accounts_mgmt/v1/certificates: Post "https://api.openshift.com/api/accounts_mgmt/v1/certificates": dial tcp: lookup api.openshift.com on 172.30.0.10:53: read udp 10.129.0.11:43005->172.30.0.10:53: read: connection refused I0216 17:48:52.583429 1 controllerstatus.go:80] name=scaController healthy=true reason=NonHTTPError message=Failed to pull SCA certs from https://api.openshift.com/api/accounts_mgmt/v1/certificates: unable to retrieve SCA certs data from https://api.openshift.com/api/accounts_mgmt/v1/certificates: Post "https://api.openshift.com/api/accounts_mgmt/v1/certificates": dial tcp: lookup api.openshift.com on 172.30.0.10:53: read udp 10.129.0.11:43005->172.30.0.10:53: read: connection refused E0216 17:48:52.583430 1 cluster_transfer.go:90] failed to pull cluster transfer: unable to retrieve cluster transfer data from https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/: Get "https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/?search=cluster_uuid+is+%!a(MISSING)2eac747-a8a6-4828-88e1-a1afa6a05a8e%!+(MISSING)and+status+is+%!a(MISSING)ccepted%!"(MISSING): dial tcp: lookup api.openshift.com on 172.30.0.10:53: read udp 10.129.0.11:43005->172.30.0.10:53: read: connection refused I0216 17:48:52.583457 1 controllerstatus.go:80] name=clusterTransferController healthy=true reason=Disconnected message=failed to pull cluster transfer: unable to retrieve cluster transfer data from https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/: Get "https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/?search=cluster_uuid+is+%27a2eac747-a8a6-4828-88e1-a1afa6a05a8e%27+and+status+is+%27accepted%27": dial tcp: lookup api.openshift.com on 172.30.0.10:53: read udp 10.129.0.11:43005->172.30.0.10:53: read: connection refused I0216 17:48:52.589562 1 tasks_processing.go:74] worker 54 stopped. I0216 17:48:52.589715 1 recorder.go:75] Recording config/version with fingerprint=fa010693b3a0016e5c309ebeab271e7f17d64812c6efbb67833600448cb328cf I0216 17:48:52.589728 1 recorder.go:75] Recording config/id with fingerprint=75868708d63208aa1d07d4be250397301428ff3d17f2c7c225800ea9fb7a8b76 I0216 17:48:52.589735 1 gather.go:180] gatherer "clusterconfig" function "version" took 181.341414ms to process 2 records I0216 17:48:52.590885 1 tasks_processing.go:74] worker 35 stopped. I0216 17:48:52.592696 1 recorder.go:75] Recording config/crd/volumesnapshots.snapshot.storage.k8s.io with fingerprint=7e85b3a83fd4d3466d50f5fea63a7674b309280a21427f5f0fe3b6b5d67d8c95 I0216 17:48:52.592843 1 recorder.go:75] Recording config/crd/volumesnapshotcontents.snapshot.storage.k8s.io with fingerprint=61a5315131017abd54c4a7a98fa08097a9cd91d3a6ac7f6b8e05f7065072b71e I0216 17:48:52.592855 1 gather.go:180] gatherer "clusterconfig" function "crds" took 182.711474ms to process 2 records I0216 17:48:52.593432 1 tasks_processing.go:74] worker 43 stopped. I0216 17:48:52.594458 1 recorder.go:75] Recording config/pod/openshift-ovn-kubernetes/ovnkube-node-6c5bz with fingerprint=639578331c9f487a8e9de42c5db7812defdfd526223aeef695348293bb6e3b2f I0216 17:48:52.594497 1 recorder.go:75] Recording config/running_containers with fingerprint=97a35ff7576882f5b1fd7205e5d074390cd8335d9dfb251ed91e7e8ec20fccbc I0216 17:48:52.594505 1 gather.go:180] gatherer "clusterconfig" function "container_images" took 185.486806ms to process 2 records I0216 17:48:52.596235 1 tasks_processing.go:74] worker 51 stopped. I0216 17:48:52.596248 1 gather.go:180] gatherer "clusterconfig" function "number_of_pods_and_netnamespaces_with_sdn_annotations" took 188.524405ms to process 0 records I0216 17:48:52.600559 1 prometheus_rules.go:88] Prometheus rules successfully created I0216 17:48:52.613864 1 tasks_processing.go:74] worker 48 stopped. E0216 17:48:52.613881 1 gather.go:143] gatherer "clusterconfig" function "ingress_certificates" failed with the error: failed to get secret 'router-certs-default' in namespace 'openshift-ingress': secrets "router-certs-default" not found E0216 17:48:52.613889 1 gather.go:143] gatherer "clusterconfig" function "ingress_certificates" failed with the error: failed to get secret '2oggcojuj7h3t7khkrb9potmf7mhfikq-primary-cert-bundle-secret' in namespace 'openshift-ingress-operator': secrets "2oggcojuj7h3t7khkrb9potmf7mhfikq-primary-cert-bundle-secret" not found I0216 17:48:52.613931 1 recorder.go:75] Recording aggregated/ingress_controllers_certs with fingerprint=227b908120c58a33cc6b1ad2f68d6285d40adcafb098a9ac9bcb3af8f53692cb I0216 17:48:52.613941 1 gather.go:180] gatherer "clusterconfig" function "ingress_certificates" took 205.939712ms to process 1 records I0216 17:48:52.629851 1 tasks_processing.go:74] worker 53 stopped. E0216 17:48:52.629865 1 gather.go:143] gatherer "clusterconfig" function "config_maps" failed with the error: configmaps "cluster-monitoring-config" not found E0216 17:48:52.629873 1 gather.go:143] gatherer "clusterconfig" function "config_maps" failed with the error: configmaps "gateway-mode-config" not found E0216 17:48:52.629877 1 gather.go:143] gatherer "clusterconfig" function "config_maps" failed with the error: configmaps "insights-config" not found I0216 17:48:52.629910 1 recorder.go:75] Recording config/configmaps/openshift-config/kube-root-ca.crt/ca.crt with fingerprint=d476c7d3f5b104863f08f481b1264dcc68cc272ecefb0ecb709b18a6afab034d I0216 17:48:52.629921 1 recorder.go:75] Recording config/configmaps/openshift-config/openshift-install/invoker with fingerprint=76b482f683cd3ef9da02debac5b26080a5aeb06ff768ee5c21117514dff29d8a I0216 17:48:52.629926 1 recorder.go:75] Recording config/configmaps/openshift-config/openshift-install/version with fingerprint=0bddb88b072029f25dde6f44cb877a44fb2f65ed4864939fbf7a3e42c0a485f6 I0216 17:48:52.629931 1 recorder.go:75] Recording config/configmaps/openshift-config/openshift-service-ca.crt/service-ca.crt with fingerprint=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 I0216 17:48:52.629952 1 recorder.go:75] Recording config/configmaps/openshift-config/rosa-brand-logo/rosa-brand-logo.svg with fingerprint=6ed8ca4dd7a8eee7249182bc006e9649ce84d76c551ddfaaa33e55d8c4cc1ed0 I0216 17:48:52.629961 1 recorder.go:75] Recording config/configmaps/kube-system/cluster-config-v1/install-config with fingerprint=ab3811c6b83fd7b8e920094cfa3080d1b4ee3c35ec4c8379437b21d27bd6608d I0216 17:48:52.629967 1 gather.go:180] gatherer "clusterconfig" function "config_maps" took 220.749838ms to process 6 records I0216 17:48:52.649390 1 base_controller.go:73] Caches are synced for LoggingSyncer I0216 17:48:52.649400 1 base_controller.go:110] Starting #1 worker of LoggingSyncer controller ... I0216 17:48:52.680145 1 gather_cluster_operators.go:184] Unable to get dnsrecords.ingress.operator.openshift.io resource due to: dnsrecords.ingress.operator.openshift.io "default" not found I0216 17:48:52.960265 1 gather_cluster_operator_pods_and_events.go:119] Found 18 pods with 21 containers I0216 17:48:52.960279 1 gather_cluster_operator_pods_and_events.go:233] Maximum buffer size: 1198372 bytes I0216 17:48:52.960586 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns container dns-default-pqrks pod in namespace openshift-dns (previous: false). I0216 17:48:53.189123 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-pqrks pod in namespace openshift-dns for failing operator dns (previous: false): "container \"dns\" in pod \"dns-default-pqrks\" is waiting to start: ContainerCreating" I0216 17:48:53.189138 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"dns\" in pod \"dns-default-pqrks\" is waiting to start: ContainerCreating" I0216 17:48:53.189143 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for kube-rbac-proxy container dns-default-pqrks pod in namespace openshift-dns (previous: false). I0216 17:48:53.366265 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-pqrks pod in namespace openshift-dns for failing operator kube-rbac-proxy (previous: false): "container \"kube-rbac-proxy\" in pod \"dns-default-pqrks\" is waiting to start: ContainerCreating" I0216 17:48:53.366276 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"kube-rbac-proxy\" in pod \"dns-default-pqrks\" is waiting to start: ContainerCreating" I0216 17:48:53.366285 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns container dns-default-qltgc pod in namespace openshift-dns (previous: false). W0216 17:48:53.461348 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 17:48:53.602344 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-qltgc pod in namespace openshift-dns for failing operator dns (previous: false): "container \"dns\" in pod \"dns-default-qltgc\" is waiting to start: ContainerCreating" I0216 17:48:53.602358 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"dns\" in pod \"dns-default-qltgc\" is waiting to start: ContainerCreating" I0216 17:48:53.602364 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for kube-rbac-proxy container dns-default-qltgc pod in namespace openshift-dns (previous: false). I0216 17:48:53.769254 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-qltgc pod in namespace openshift-dns for failing operator kube-rbac-proxy (previous: false): "container \"kube-rbac-proxy\" in pod \"dns-default-qltgc\" is waiting to start: ContainerCreating" I0216 17:48:53.769271 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"kube-rbac-proxy\" in pod \"dns-default-qltgc\" is waiting to start: ContainerCreating" I0216 17:48:53.769282 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns container dns-default-s5q5v pod in namespace openshift-dns (previous: false). I0216 17:48:53.802658 1 gather_cluster_operators.go:184] Unable to get configs.samples.operator.openshift.io resource due to: configs.samples.operator.openshift.io "cluster" not found I0216 17:48:53.985259 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-s5q5v pod in namespace openshift-dns for failing operator dns (previous: false): "container \"dns\" in pod \"dns-default-s5q5v\" is waiting to start: ContainerCreating" I0216 17:48:53.985270 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"dns\" in pod \"dns-default-s5q5v\" is waiting to start: ContainerCreating" I0216 17:48:53.985276 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for kube-rbac-proxy container dns-default-s5q5v pod in namespace openshift-dns (previous: false). I0216 17:48:54.166543 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-s5q5v pod in namespace openshift-dns for failing operator kube-rbac-proxy (previous: false): "container \"kube-rbac-proxy\" in pod \"dns-default-s5q5v\" is waiting to start: ContainerCreating" I0216 17:48:54.166558 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"kube-rbac-proxy\" in pod \"dns-default-s5q5v\" is waiting to start: ContainerCreating" I0216 17:48:54.166565 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns-node-resolver container node-resolver-8kg54 pod in namespace openshift-dns (previous: false). I0216 17:48:54.204237 1 tasks_processing.go:74] worker 63 stopped. I0216 17:48:54.204288 1 recorder.go:75] Recording config/clusteroperator/console with fingerprint=82d3a5ae53a0df1a283d4893e0d1b783d526d8862c4dc90f72de98ba5899c0c1 I0216 17:48:54.204325 1 recorder.go:75] Recording config/clusteroperator/csi-snapshot-controller with fingerprint=9e65f357caf43f5635a74cf5a3635cd2cff37f40a0c50691d57b31449639b3d5 I0216 17:48:54.204366 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/csisnapshotcontroller/cluster with fingerprint=5adc514f4b63e2f1ecc68bf6f9c0af70c5eea04522a49524e102721b1c41f80e I0216 17:48:54.204406 1 recorder.go:75] Recording config/clusteroperator/dns with fingerprint=3f24d4aa5138dc198e3b581b229707b0511eb4f7e9780975f3c7e28fc0b3374b I0216 17:48:54.204432 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/dns/default with fingerprint=9e7b4ce029030d3d8c3b49af92c556acdcc415000b40d3f969dbdc42c432b47f I0216 17:48:54.204472 1 recorder.go:75] Recording config/clusteroperator/image-registry with fingerprint=3e0c0ee65b466711a3479f50d037528252bd4a82dd0179cfa41ad018f7742494 I0216 17:48:54.204500 1 recorder.go:75] Recording config/clusteroperator/ingress with fingerprint=e63eb41241b7337910848e5268ead7e2351ad6204e591192feee2faa6a7f0917 I0216 17:48:54.204527 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/ingresscontroller/openshift-ingress-operator/default with fingerprint=2d91fe7dd764ab28bbe756249db284bd637f7ce2b5c3b78d2bad7548820e77e6 I0216 17:48:54.204546 1 recorder.go:75] Recording config/clusteroperator/insights with fingerprint=1fe1604ccf53d58db0cb62b94c0eb05bda06c6b8d37efc9726c3458b13899776 I0216 17:48:54.204567 1 recorder.go:75] Recording config/clusteroperator/kube-apiserver with fingerprint=7b2f93dc2f917015027a077a4432a94f9745208f0763a4e12efccdfbff27cc56 I0216 17:48:54.204579 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/kubeapiserver/cluster with fingerprint=51503bf0b784fcf65ea46bcaf1f72ac1a5c4d5dc211934f18f27871efed05762 I0216 17:48:54.204595 1 recorder.go:75] Recording config/clusteroperator/kube-controller-manager with fingerprint=4db2b84b3ce6859bbb734e268c5684af6f0f02120bf4e153339e66a23cb7b5d9 I0216 17:48:54.204608 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/kubecontrollermanager/cluster with fingerprint=ce90c0d4f367d7da085074268031798382ae7c54fdcb0a21f15a4818fe308c11 I0216 17:48:54.204624 1 recorder.go:75] Recording config/clusteroperator/kube-scheduler with fingerprint=3b923449aa50fabcec69a42f3e6ab8f3ab79dce23614a5ab614ae161e56e3dfa I0216 17:48:54.204635 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/kubescheduler/cluster with fingerprint=f2940fb9fd20c19951dfc295eb363b7fba0c505f5ae61f01967a063099e6b60a I0216 17:48:54.204655 1 recorder.go:75] Recording config/clusteroperator/kube-storage-version-migrator with fingerprint=819b0e91307804656d8f5daff8cae0ec03a0ec0727c86d096170d9848282851c I0216 17:48:54.204667 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/kubestorageversionmigrator/cluster with fingerprint=9351181aa7e6ada41ef581ab31e13516c6b934cc95710154bafb2eb222cb58db I0216 17:48:54.204683 1 recorder.go:75] Recording config/clusteroperator/monitoring with fingerprint=01ff1a8df29ab115dbdb4ee40c2213a021ac16318f83694f2d252d4726e9c8ec I0216 17:48:54.204757 1 recorder.go:75] Recording config/clusteroperator/network with fingerprint=a2ae42bfb71a7acefbaf23279b1cf03cf411a31a1e6e033f7543943d1e49fab2 I0216 17:48:54.204769 1 recorder.go:75] Recording config/clusteroperator/network.operator.openshift.io/operatorpki/openshift-ovn-kubernetes/ovn with fingerprint=626a89d20e0deaed5b6dfb533acfe65f4bb1618bd200a703b62e60c5d16d94ab I0216 17:48:54.204779 1 recorder.go:75] Recording config/clusteroperator/network.operator.openshift.io/operatorpki/openshift-ovn-kubernetes/signer with fingerprint=90410b16914712b85b3c4578716ad8c0ae072e688f4cd1e022bf76f20da3506d I0216 17:48:54.204805 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/network/cluster with fingerprint=a93d15eaecb455a0e40ecb2826eeecc1533899204ddd3c3921d15ab70af7ae75 I0216 17:48:54.204827 1 recorder.go:75] Recording config/clusteroperator/node-tuning with fingerprint=f48e0e36045916430972d644243ee6ea5d5beb275e8b218e5c2cca08ef88cbb6 I0216 17:48:54.204849 1 recorder.go:75] Recording config/clusteroperator/openshift-apiserver with fingerprint=42ddd71957fa9ed6e3c1b24043bea1bcd18681a434a8e67e1e40c5440358b7dd I0216 17:48:54.204866 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/openshiftapiserver/cluster with fingerprint=e712e6cf27339b441e4ed1f4cde91dbde7e952698ba93407e4457db63a4a4c76 I0216 17:48:54.204887 1 recorder.go:75] Recording config/clusteroperator/openshift-controller-manager with fingerprint=11977aaaef5a22d23116f74d688f6172d8cfdc524ee00171ca1c7957b6506c38 I0216 17:48:54.204899 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/openshiftcontrollermanager/cluster with fingerprint=d71a0f4672f9b45d9fc8293bf1687afc650fd28d32e2e30de27523fe7b4eadf7 I0216 17:48:54.204912 1 recorder.go:75] Recording config/clusteroperator/openshift-samples with fingerprint=ef0c0b8c52ee00572885808fac29c73c047ca2822240c260ca3c00fdbe24b49d I0216 17:48:54.204929 1 recorder.go:75] Recording config/clusteroperator/operator-lifecycle-manager with fingerprint=a09eb90847fbd218e38fe8e3f54981b3b4d18c421588ea6c367f9aae49d10f65 I0216 17:48:54.204946 1 recorder.go:75] Recording config/clusteroperator/operator-lifecycle-manager-catalog with fingerprint=cc97b69bc499cb1883e98a5425014ea5191453ace7c2e2cf6fe34b8be45d557c I0216 17:48:54.204968 1 recorder.go:75] Recording config/clusteroperator/operator-lifecycle-manager-packageserver with fingerprint=00656f2b0d372d19110f9dfbea7a9f48e0dc427f42974c8207c5b48eaa931849 I0216 17:48:54.204983 1 recorder.go:75] Recording config/clusteroperator/service-ca with fingerprint=4e9517f9671966e633be52c496209bf01483a3b7c9c9e07c5014437299a68860 I0216 17:48:54.205012 1 recorder.go:75] Recording config/clusteroperator/storage with fingerprint=b0ba64a28178fbe79d629fcd573a6100a827b533e170e9f148114e52fdfe66d9 I0216 17:48:54.205030 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/clustercsidriver/ebs.csi.aws.com with fingerprint=7e1ab8f8cfcd9d249b5b213939fe5144bb83db3725475461728bea44a002c3be I0216 17:48:54.205042 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/storage/cluster with fingerprint=8e480f8c1ce1b39baac42d8ec780c57c2592929ae0c801b61ffad49ba13f33ad I0216 17:48:54.205051 1 gather.go:180] gatherer "clusterconfig" function "operators" took 1.796978113s to process 35 records I0216 17:48:54.366981 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 17:48:54.366992 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns-node-resolver container node-resolver-bsxg5 pod in namespace openshift-dns (previous: false). W0216 17:48:54.461594 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 17:48:54.567142 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 17:48:54.567155 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns-node-resolver container node-resolver-jq6p5 pod in namespace openshift-dns (previous: false). I0216 17:48:54.766453 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 17:48:54.766467 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for registry container image-registry-545879b8f-7fj4r pod in namespace openshift-image-registry (previous: false). I0216 17:48:54.970573 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for image-registry-545879b8f-7fj4r pod in namespace openshift-image-registry for failing operator registry (previous: false): "container \"registry\" in pod \"image-registry-545879b8f-7fj4r\" is waiting to start: ContainerCreating" I0216 17:48:54.970586 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"registry\" in pod \"image-registry-545879b8f-7fj4r\" is waiting to start: ContainerCreating" I0216 17:48:54.970596 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for registry container image-registry-545879b8f-dfqdr pod in namespace openshift-image-registry (previous: false). I0216 17:48:55.166119 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for image-registry-545879b8f-dfqdr pod in namespace openshift-image-registry for failing operator registry (previous: false): "container \"registry\" in pod \"image-registry-545879b8f-dfqdr\" is waiting to start: ContainerCreating" I0216 17:48:55.166134 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"registry\" in pod \"image-registry-545879b8f-dfqdr\" is waiting to start: ContainerCreating" I0216 17:48:55.166142 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for registry container image-registry-7b6774c6f4-pjl8r pod in namespace openshift-image-registry (previous: false). I0216 17:48:55.367151 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for image-registry-7b6774c6f4-pjl8r pod in namespace openshift-image-registry for failing operator registry (previous: false): "container \"registry\" in pod \"image-registry-7b6774c6f4-pjl8r\" is waiting to start: ContainerCreating" I0216 17:48:55.367162 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"registry\" in pod \"image-registry-7b6774c6f4-pjl8r\" is waiting to start: ContainerCreating" I0216 17:48:55.367169 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for node-ca container node-ca-9p492 pod in namespace openshift-image-registry (previous: false). W0216 17:48:55.461276 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 17:48:55.567081 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 17:48:55.567092 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for node-ca container node-ca-jtchj pod in namespace openshift-image-registry (previous: false). I0216 17:48:55.766977 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 17:48:55.766988 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for node-ca container node-ca-v7j4n pod in namespace openshift-image-registry (previous: false). I0216 17:48:55.967152 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 17:48:55.967164 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for router container router-default-8799b7d4b-c47jl pod in namespace openshift-ingress (previous: false). I0216 17:48:56.166633 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for router-default-8799b7d4b-c47jl pod in namespace openshift-ingress for failing operator router (previous: false): "container \"router\" in pod \"router-default-8799b7d4b-c47jl\" is waiting to start: ContainerCreating" I0216 17:48:56.166644 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"router\" in pod \"router-default-8799b7d4b-c47jl\" is waiting to start: ContainerCreating" I0216 17:48:56.166652 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for router container router-default-8799b7d4b-vwmg6 pod in namespace openshift-ingress (previous: false). I0216 17:48:56.367029 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for router-default-8799b7d4b-vwmg6 pod in namespace openshift-ingress for failing operator router (previous: false): "container \"router\" in pod \"router-default-8799b7d4b-vwmg6\" is waiting to start: ContainerCreating" I0216 17:48:56.367040 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"router\" in pod \"router-default-8799b7d4b-vwmg6\" is waiting to start: ContainerCreating" I0216 17:48:56.367048 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for router container router-default-d4c4955b9-zqlks pod in namespace openshift-ingress (previous: false). W0216 17:48:56.460908 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 17:48:56.568141 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for router-default-d4c4955b9-zqlks pod in namespace openshift-ingress for failing operator router (previous: false): "container \"router\" in pod \"router-default-d4c4955b9-zqlks\" is waiting to start: ContainerCreating" I0216 17:48:56.568152 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"router\" in pod \"router-default-d4c4955b9-zqlks\" is waiting to start: ContainerCreating" I0216 17:48:56.568159 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for serve-healthcheck-canary container ingress-canary-4cmmk pod in namespace openshift-ingress-canary (previous: false). I0216 17:48:56.767238 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for ingress-canary-4cmmk pod in namespace openshift-ingress-canary for failing operator serve-healthcheck-canary (previous: false): "container \"serve-healthcheck-canary\" in pod \"ingress-canary-4cmmk\" is waiting to start: ContainerCreating" I0216 17:48:56.767249 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"serve-healthcheck-canary\" in pod \"ingress-canary-4cmmk\" is waiting to start: ContainerCreating" I0216 17:48:56.767255 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for serve-healthcheck-canary container ingress-canary-chstv pod in namespace openshift-ingress-canary (previous: false). I0216 17:48:56.967326 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for ingress-canary-chstv pod in namespace openshift-ingress-canary for failing operator serve-healthcheck-canary (previous: false): "container \"serve-healthcheck-canary\" in pod \"ingress-canary-chstv\" is waiting to start: ContainerCreating" I0216 17:48:56.967338 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"serve-healthcheck-canary\" in pod \"ingress-canary-chstv\" is waiting to start: ContainerCreating" I0216 17:48:56.967345 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for serve-healthcheck-canary container ingress-canary-x5frj pod in namespace openshift-ingress-canary (previous: false). I0216 17:48:57.166643 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for ingress-canary-x5frj pod in namespace openshift-ingress-canary for failing operator serve-healthcheck-canary (previous: false): "container \"serve-healthcheck-canary\" in pod \"ingress-canary-x5frj\" is waiting to start: ContainerCreating" I0216 17:48:57.166655 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"serve-healthcheck-canary\" in pod \"ingress-canary-x5frj\" is waiting to start: ContainerCreating" I0216 17:48:57.166664 1 tasks_processing.go:74] worker 32 stopped. I0216 17:48:57.166723 1 recorder.go:75] Recording events/openshift-dns-operator with fingerprint=17253730be8ade81feb054f07461822c8e87b76a4be624424f07e00b16a2eb59 I0216 17:48:57.166755 1 recorder.go:75] Recording events/openshift-dns with fingerprint=a314446615d881022c8db94ece6c7dd7e1cb64e5ff7c18dae30c6c587d703394 I0216 17:48:57.166799 1 recorder.go:75] Recording events/openshift-image-registry with fingerprint=241dccae7afa84e223ab412b82f281079db54e09921919b1fd02d17fd6af8456 I0216 17:48:57.166815 1 recorder.go:75] Recording events/openshift-ingress-operator with fingerprint=94ddb7e087545becfb438c12706c8dd403bd8de4aaba43de1bf94d482f941fbc I0216 17:48:57.166840 1 recorder.go:75] Recording events/openshift-ingress with fingerprint=967d7412322aa22148da1f3c84ae28b7ca5ab0239b217d66b1fa3b0f85db8150 I0216 17:48:57.166853 1 recorder.go:75] Recording events/openshift-ingress-canary with fingerprint=e055481df83f38d260af761dd1dc7818cb88478db8e2c85504a2bf182c933953 I0216 17:48:57.166859 1 gather.go:180] gatherer "clusterconfig" function "operators_pods_and_events" took 4.759953041s to process 6 records W0216 17:48:57.461374 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. W0216 17:48:57.461394 1 gather_dvo_metrics.go:117] Unable to read metrics from endpoint "http://deployment-validation-operator-metrics.openshift-deployment-validation-operator.svc:8383": DVO metrics service was not available within the 5s timeout: context deadline exceeded I0216 17:48:57.461403 1 tasks_processing.go:74] worker 9 stopped. E0216 17:48:57.461411 1 gather.go:143] gatherer "clusterconfig" function "dvo_metrics" failed with the error: DVO metrics service was not available within the 5s timeout: context deadline exceeded I0216 17:48:57.461419 1 recorder.go:75] Recording config/dvo_metrics with fingerprint=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 W0216 17:48:57.461431 1 gather.go:158] issue recording gatherer "clusterconfig" function "dvo_metrics" result "config/dvo_metrics" because of the warning: warning: the record with the same fingerprint "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" was already recorded at path "config/configmaps/openshift-config/openshift-service-ca.crt/service-ca.crt", recording another one with a different path "config/dvo_metrics" I0216 17:48:57.461456 1 gather.go:180] gatherer "clusterconfig" function "dvo_metrics" took 5.054291277s to process 1 records I0216 17:49:04.959427 1 tasks_processing.go:74] worker 18 stopped. I0216 17:49:04.959541 1 recorder.go:75] Recording config/installplans with fingerprint=b6ae0e2549358513c087729c711e8e1ad6f2144adc0ffa716b1a475ed1e6ddde I0216 17:49:04.959568 1 gather.go:180] gatherer "clusterconfig" function "install_plans" took 12.550831515s to process 1 records I0216 17:49:05.618514 1 tasks_processing.go:74] worker 57 stopped. I0216 17:49:05.618712 1 recorder.go:75] Recording config/serviceaccounts with fingerprint=7a09d1db6b3fe929b096c400270e4a23def9688e400b69b757eda99e44900cb9 I0216 17:49:05.618727 1 gather.go:180] gatherer "clusterconfig" function "service_accounts" took 13.20912928s to process 1 records E0216 17:49:05.618767 1 periodic.go:252] clusterconfig failed after 13.212s with: function "machine_healthchecks" failed with an error, function "pod_network_connectivity_checks" failed with an error, function "machines" failed with an error, function "support_secret" failed with an error, function "ingress_certificates" failed with an error, function "config_maps" failed with an error, function "dvo_metrics" failed with an error I0216 17:49:05.618778 1 controllerstatus.go:89] name=periodic-clusterconfig healthy=false reason=PeriodicGatherFailed message=Source clusterconfig could not be retrieved: function "machine_healthchecks" failed with an error, function "pod_network_connectivity_checks" failed with an error, function "machines" failed with an error, function "support_secret" failed with an error, function "ingress_certificates" failed with an error, function "config_maps" failed with an error, function "dvo_metrics" failed with an error I0216 17:49:05.618784 1 periodic.go:214] Running workloads gatherer I0216 17:49:05.618795 1 tasks_processing.go:45] number of workers: 2 I0216 17:49:05.618800 1 tasks_processing.go:69] worker 1 listening for tasks. I0216 17:49:05.618803 1 tasks_processing.go:71] worker 1 working on workload_info task. I0216 17:49:05.618815 1 tasks_processing.go:69] worker 0 listening for tasks. I0216 17:49:05.618878 1 tasks_processing.go:71] worker 0 working on helmchart_info task. I0216 17:49:05.640866 1 gather_workloads_info.go:257] Loaded pods in 0s, will wait 22s for image data I0216 17:49:05.650860 1 gather_workloads_info.go:366] No image sha256:79449e16b1207223f1209d19888b879eb56a8202c53df4800e09b231392cf219 (10ms) I0216 17:49:05.653950 1 tasks_processing.go:74] worker 0 stopped. I0216 17:49:05.653968 1 gather.go:180] gatherer "workloads" function "helmchart_info" took 35.041837ms to process 0 records I0216 17:49:05.660984 1 gather_workloads_info.go:366] No image sha256:b34e84d56775e42b7d832d14c4f9dc302fee37cd81ba221397cd8acba2089d20 (10ms) I0216 17:49:05.670541 1 gather_workloads_info.go:366] No image sha256:0d1d37dbdb726e924b519ef27e52e9719601fab838ae75f72c8aca11e8c3b4cc (10ms) I0216 17:49:05.680040 1 gather_workloads_info.go:366] No image sha256:0f31e990f9ca9d15dcb1b25325c8265515fcc06381909349bb021103827585c6 (9ms) I0216 17:49:05.689824 1 gather_workloads_info.go:366] No image sha256:2bf8536171476b2d616cf62b4d94d2f1dae34aca6ea6bfdb65e764a8d9675891 (10ms) I0216 17:49:05.699390 1 gather_workloads_info.go:366] No image sha256:2121717e0222b9e8892a44907b461a4f62b3f1e5429a0e2eee802d48d04fff30 (10ms) I0216 17:49:05.709514 1 gather_workloads_info.go:366] No image sha256:43e426ac9df633be58006907aede6f9b6322c6cc7985cd43141ad7518847c637 (10ms) I0216 17:49:05.720639 1 gather_workloads_info.go:366] No image sha256:27e725f1250f6a17da5eba7ada315a244592b5b822d61e95722bb7e2f884b00f (11ms) I0216 17:49:05.730303 1 gather_workloads_info.go:366] No image sha256:7f55b7dbfb15fe36d83d64027eacee22fb00688ccbc03550cc2dbedfa633f288 (10ms) I0216 17:49:05.740041 1 gather_workloads_info.go:366] No image sha256:29d1672ef44c59d065737eca330075dd2f6da4ba743153973a739aa9e9d73ad3 (10ms) I0216 17:49:05.750903 1 gather_workloads_info.go:366] No image sha256:822db36f8e1353ac24785b88d1fb2150d3ef34a5e739c1f67b61079336e9798b (11ms) I0216 17:49:05.851153 1 gather_workloads_info.go:366] No image sha256:712ad2760c350db1e23b9393bdda83149452931dc7b5a5038a3bcdb4663917c0 (100ms) I0216 17:49:05.953690 1 gather_workloads_info.go:366] No image sha256:5335f64616c3a6c55a9a6dc4bc084b46a4957fb4fc250afc5343e4547ebb3598 (103ms) I0216 17:49:06.051031 1 gather_workloads_info.go:366] No image sha256:33d7e5c63340e93b5a063de538017ac693f154e3c27ee2ef8a8a53bb45583552 (97ms) I0216 17:49:06.151025 1 gather_workloads_info.go:366] No image sha256:29e41a505a942a77c0d5f954eb302c01921cb0c0d176066fe63f82f3e96e3923 (100ms) I0216 17:49:06.253543 1 gather_workloads_info.go:366] No image sha256:357821852af925e0c8a19df2f9fceec8d2e49f9d13575b86ecd3fbedce488afa (103ms) I0216 17:49:06.353814 1 gather_workloads_info.go:366] No image sha256:59f553035bc347fc7205f1c071897bc2606b98525d6b9a3aca62fc9cd7078a57 (100ms) I0216 17:49:06.451114 1 gather_workloads_info.go:366] No image sha256:deffb0293fd11f5b40609aa9e80b16b0f90a9480013b2b7f61bd350bbd9b6f07 (97ms) I0216 17:49:06.551195 1 gather_workloads_info.go:366] No image sha256:2193d7361704b0ae4bca052e9158761e06ecbac9ca3f0a9c8f0f101127e8f370 (100ms) I0216 17:49:06.656168 1 gather_workloads_info.go:366] No image sha256:f550296753e9898c67d563b7deb16ba540ca1367944c905415f35537b6b949d4 (105ms) I0216 17:49:06.753646 1 gather_workloads_info.go:366] No image sha256:91d9cb208e6d0c39a87dfe8276d162c75ff3fcd3b005b3e7b537f65c53475a42 (97ms) I0216 17:49:06.850958 1 gather_workloads_info.go:366] No image sha256:457372d9f22e1c726ea1a6fcc54ddca8335bd607d2c357bcd7b63a7017aa5c2b (97ms) I0216 17:49:06.951101 1 gather_workloads_info.go:366] No image sha256:586e9c2756f50e562a6123f47fe38dba5496b63413c3dd18e0b85d6167094f0c (100ms) I0216 17:49:07.051265 1 gather_workloads_info.go:366] No image sha256:745f2186738a57bb1b484f68431e77aa2f68a1b8dcb434b1f7a4b429eafdf091 (100ms) I0216 17:49:07.154325 1 gather_workloads_info.go:366] No image sha256:64ef34275f7ea992f5a4739cf7a724e55806bfab0c752fc0eccc2f70dfecbaf4 (103ms) I0216 17:49:07.251242 1 gather_workloads_info.go:366] No image sha256:f82357030795138d2081ecc5172092222b0f4faea27e9a7a0474fbeae29111ad (97ms) I0216 17:49:07.351146 1 gather_workloads_info.go:366] No image sha256:185305b7da4ef5b90a90046f145e8c66bab3a16b12771d2e98bf78104d6a60f2 (100ms) I0216 17:49:07.451339 1 gather_workloads_info.go:366] No image sha256:c822bd444a7bc53b21afb9372ff0a24961b2687073f3563c127cce5803801b04 (100ms) I0216 17:49:07.553203 1 gather_workloads_info.go:366] No image sha256:9cc55a501aaad1adbefdd573e57c2f756a3a6a8723c43052995be6389edf1fa8 (102ms) I0216 17:49:07.651574 1 gather_workloads_info.go:366] No image sha256:88e6cc2192e682bb9c4ac5aec8e41254696d909c5dc337e720b9ec165a728064 (98ms) I0216 17:49:07.750680 1 gather_workloads_info.go:366] No image sha256:3958f525bae8ad011915244c9c8c1c2c750b761094046b2719fae36f6ac8903c (99ms) I0216 17:49:07.826722 1 configmapobserver.go:84] configmaps "insights-config" not found I0216 17:49:07.850997 1 gather_workloads_info.go:366] No image sha256:036e6f9a4609a7499f200032dac2294e4a2d98764464ed17453ef725f2f0264d (100ms) I0216 17:49:07.851020 1 tasks_processing.go:74] worker 1 stopped. I0216 17:49:07.851216 1 recorder.go:75] Recording config/workload_info with fingerprint=7d6aa97eaa4a73b31452287a693d3f3283d634acbefe9c962b81497ff05fa3ac I0216 17:49:07.851229 1 gather.go:180] gatherer "workloads" function "workload_info" took 2.232211234s to process 1 records I0216 17:49:07.851239 1 periodic.go:261] Periodic gather workloads completed in 2.232s I0216 17:49:07.851247 1 controllerstatus.go:80] name=periodic-workloads healthy=true reason= message= I0216 17:49:07.851251 1 periodic.go:214] Running conditional gatherer I0216 17:49:07.857355 1 requests.go:282] Making HTTP GET request at: https://console.redhat.com/api/gathering/v2/4.17.48/gathering_rules I0216 17:49:07.862643 1 conditional_gatherer.go:107] Get "https://console.redhat.com/api/gathering/v2/4.17.48/gathering_rules": dial tcp: lookup console.redhat.com on 172.30.0.10:53: read udp 10.129.0.11:41744->172.30.0.10:53: read: connection refused E0216 17:49:07.862862 1 conditional_gatherer.go:324] unable to update alerts cache: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 17:49:07.862915 1 conditional_gatherer.go:386] updating version cache for conditional gatherer I0216 17:49:07.869688 1 conditional_gatherer.go:394] cluster version is '4.17.48' E0216 17:49:07.869700 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 17:49:07.869704 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 17:49:07.869707 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 17:49:07.869709 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 17:49:07.869711 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 17:49:07.869714 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 17:49:07.869717 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 17:49:07.869719 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 17:49:07.869720 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing I0216 17:49:07.869730 1 tasks_processing.go:45] number of workers: 3 I0216 17:49:07.869741 1 tasks_processing.go:69] worker 2 listening for tasks. I0216 17:49:07.869744 1 tasks_processing.go:71] worker 2 working on conditional_gatherer_rules task. I0216 17:49:07.869752 1 tasks_processing.go:69] worker 0 listening for tasks. I0216 17:49:07.869764 1 tasks_processing.go:71] worker 0 working on remote_configuration task. I0216 17:49:07.869764 1 tasks_processing.go:69] worker 1 listening for tasks. I0216 17:49:07.869776 1 tasks_processing.go:74] worker 1 stopped. I0216 17:49:07.869776 1 tasks_processing.go:71] worker 2 working on rapid_container_logs task. I0216 17:49:07.869802 1 recorder.go:75] Recording insights-operator/conditional-gatherer-rules with fingerprint=7034af97e7e41c22e4b775abdd4b9066c8ebb19da33eb7f69f39bfd2eb5f6406 I0216 17:49:07.869812 1 gather.go:180] gatherer "conditional" function "conditional_gatherer_rules" took 551ns to process 1 records I0216 17:49:07.869834 1 recorder.go:75] Recording insights-operator/remote-configuration with fingerprint=0394430c431eec4d48bb1811a90918e95161d2282c59af26f2473613cc0959db I0216 17:49:07.869841 1 gather.go:180] gatherer "conditional" function "remote_configuration" took 711ns to process 1 records I0216 17:49:07.869845 1 tasks_processing.go:74] worker 0 stopped. I0216 17:49:07.869925 1 tasks_processing.go:74] worker 2 stopped. I0216 17:49:07.869936 1 gather.go:180] gatherer "conditional" function "rapid_container_logs" took 137.054µs to process 0 records I0216 17:49:07.869952 1 controllerstatus.go:89] name=periodic-conditional healthy=false reason=NotAvailable message=Get "https://console.redhat.com/api/gathering/v2/4.17.48/gathering_rules": dial tcp: lookup console.redhat.com on 172.30.0.10:53: read udp 10.129.0.11:41744->172.30.0.10:53: read: connection refused I0216 17:49:07.869963 1 recorder.go:75] Recording insights-operator/remote-configuration.json with fingerprint=359de9c990c741675cec72fda96b5c3682221efdb4799f5eaa6e9805bcd3b5c1 W0216 17:49:07.897904 1 gather.go:212] can't read cgroups memory usage data: open /sys/fs/cgroup/memory/memory.usage_in_bytes: no such file or directory I0216 17:49:07.897987 1 recorder.go:75] Recording insights-operator/gathers with fingerprint=f9a3e7edcf605d33c03e4a28ae97155ce4e29dd0864eae25a0ac31aaa0bff8f3 I0216 17:49:07.898066 1 diskrecorder.go:70] Writing 98 records to /var/lib/insights-operator/insights-2026-02-16-174907.tar.gz I0216 17:49:07.902865 1 diskrecorder.go:51] Wrote 98 records to disk in 4ms I0216 17:49:07.902895 1 periodic.go:283] Gathering cluster info every 2h0m0s I0216 17:49:07.902908 1 periodic.go:284] Configuration is dataReporting: interval: 2h0m0s, uploadEndpoint: https://console.redhat.com/api/ingress/v1/upload, storagePath: /var/lib/insights-operator, downloadEndpoint: https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports, conditionalGathererEndpoint: https://console.redhat.com/api/gathering/v2/%s/gathering_rules, obfuscation: [] sca: disabled: false, endpoint: https://api.openshift.com/api/accounts_mgmt/v1/certificates, interval: 8h0m0s alerting: disabled: false clusterTransfer: endpoint: https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/, interval: 12h0m0s proxy: httpProxy: , httpsProxy: , noProxy: I0216 17:49:08.028405 1 configmapobserver.go:84] configmaps "insights-config" not found I0216 17:49:19.220212 1 configmapobserver.go:84] configmaps "insights-config" not found I0216 17:50:02.080537 1 observer_polling.go:111] Observed file "/var/run/secrets/serving-cert/tls.crt" has been created (hash="1b59219c872328c3cd95ae8e7b185f28355526919a932b7d69df32769dbe7806") W0216 17:50:02.080567 1 builder.go:155] Restart triggered because of file /var/run/secrets/serving-cert/tls.crt was created I0216 17:50:02.080595 1 observer_polling.go:111] Observed file "/var/run/secrets/serving-cert/tls.key" has been created (hash="f58e8403185d4c32d126616df5d7c91db0c7ddd592494a898ab31b69dd99baea") I0216 17:50:02.080641 1 observer_polling.go:111] Observed file "/var/run/configmaps/service-ca-bundle/service-ca.crt" has been created (hash="d17f388d484b48a409b0c759314bdbdc5bf9f27b3e0bb33d5dcea9a76979329f") I0216 17:50:02.080686 1 base_controller.go:172] Shutting down LoggingSyncer ... I0216 17:50:02.080702 1 simple_featuregate_reader.go:177] Shutting down feature-gate-detector I0216 17:50:02.080706 1 genericapiserver.go:679] "[graceful-termination] pre-shutdown hooks completed" name="PreShutdownHooksStopped" I0216 17:50:02.080730 1 genericapiserver.go:536] "[graceful-termination] shutdown event" name="ShutdownInitiated" I0216 17:50:02.080764 1 requestheader_controller.go:183] Shutting down RequestHeaderAuthRequestController I0216 17:50:02.080774 1 configmap_cafile_content.go:223] "Shutting down controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" I0216 17:50:02.080787 1 object_count_tracker.go:151] "StorageObjectCountTracker pruner is exiting" I0216 17:50:02.080790 1 periodic.go:175] Shutting down