W0216 11:12:11.304096 1 cmd.go:245] Using insecure, self-signed certificates I0216 11:12:11.649732 1 start.go:223] Unable to read service ca bundle: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 11:12:11.649911 1 observer_polling.go:159] Starting file observer I0216 11:12:12.009686 1 operator.go:59] Starting insights-operator v0.0.0-master+$Format:%H$ I0216 11:12:12.009863 1 legacy_config.go:327] Current config: {"report":false,"storagePath":"/var/lib/insights-operator","interval":"2h","endpoint":"https://console.redhat.com/api/ingress/v1/upload","conditionalGathererEndpoint":"https://console.redhat.com/api/gathering/v2/%s/gathering_rules","pull_report":{"endpoint":"https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports","delay":"60s","timeout":"3000s","min_retry":"30s"},"impersonate":"system:serviceaccount:openshift-insights:gather","enableGlobalObfuscation":false,"ocm":{"scaEndpoint":"https://api.openshift.com/api/accounts_mgmt/v1/certificates","scaInterval":"8h","scaDisabled":false,"clusterTransferEndpoint":"https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/","clusterTransferInterval":"12h"},"disableInsightsAlerts":false,"processingStatusEndpoint":"https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/request/%s/status","reportEndpointTechPreview":"https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/request/%s/report"} I0216 11:12:12.010161 1 simple_featuregate_reader.go:171] Starting feature-gate-detector I0216 11:12:12.010169 1 secure_serving.go:57] Forcing use of http/1.1 only W0216 11:12:12.010187 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected. W0216 11:12:12.010192 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected. W0216 11:12:12.010197 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected. W0216 11:12:12.010199 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected. W0216 11:12:12.010201 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected. W0216 11:12:12.010203 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected. I0216 11:12:12.016542 1 event.go:364] Event(v1.ObjectReference{Kind:"Deployment", Namespace:"openshift-insights", Name:"insights-operator", UID:"de721490-b328-4431-a346-14d6060a1ff1", APIVersion:"apps/v1", ResourceVersion:"", FieldPath:""}): type: 'Normal' reason: 'FeatureGatesInitialized' FeatureGates updated to featuregates.Features{Enabled:[]v1.FeatureGateName{"AWSEFSDriverVolumeMetrics", "AdminNetworkPolicy", "AlibabaPlatform", "AzureWorkloadIdentity", "BareMetalLoadBalancer", "BuildCSIVolumes", "ChunkSizeMiB", "CloudDualStackNodeIPs", "ClusterAPIInstallAWS", "ClusterAPIInstallAzure", "ClusterAPIInstallGCP", "ClusterAPIInstallNutanix", "ClusterAPIInstallOpenStack", "ClusterAPIInstallPowerVS", "ClusterAPIInstallVSphere", "DisableKubeletCloudCredentialProviders", "ExternalCloudProvider", "ExternalCloudProviderAzure", "ExternalCloudProviderExternal", "ExternalCloudProviderGCP", "ExternalOIDC", "GCPLabelsTags", "HardwareSpeed", "IngressControllerLBSubnetsAWS", "KMSv1", "ManagedBootImages", "MetricsServer", "MultiArchInstallAWS", "MultiArchInstallGCP", "NetworkDiagnosticsConfig", "NetworkLiveMigration", "NodeDisruptionPolicy", "PrivateHostedZoneAWS", "SetEIPForNLBIngressController", "StreamingCollectionEncodingToJSON", "StreamingCollectionEncodingToProtobuf", "VSphereControlPlaneMachineSet", "VSphereDriverConfiguration", "VSphereStaticIPs", "ValidatingAdmissionPolicy"}, Disabled:[]v1.FeatureGateName{"AdditionalRoutingCapabilities", "AutomatedEtcdBackup", "BootcNodeManagement", "CSIDriverSharedResource", "ClusterAPIInstall", "ClusterAPIInstallIBMCloud", "ClusterMonitoringConfig", "DNSNameResolver", "DynamicResourceAllocation", "EtcdBackendQuota", "EventedPLEG", "Example", "GCPClusterHostedDNS", "GatewayAPI", "InsightsConfig", "InsightsConfigAPI", "InsightsOnDemandDataGather", "InstallAlternateInfrastructureAWS", "MachineAPIMigration", "MachineAPIOperatorDisableMachineHealthCheckController", "MachineAPIProviderOpenStack", "MachineConfigNodes", "ManagedBootImagesAWS", "MaxUnavailableStatefulSet", "MetricsCollectionProfiles", "MixedCPUsAllocation", "MultiArchInstallAzure", "NetworkSegmentation", "NewOLM", "NodeSwap", "OVNObservability", "OnClusterBuild", "OpenShiftPodSecurityAdmission", "PersistentIPsForVirtualization", "PinnedImages", "PlatformOperators", "ProcMountType", "RouteAdvertisements", "RouteExternalCertificate", "ServiceAccountTokenNodeBinding", "SignatureStores", "SigstoreImageVerification", "TranslateStreamCloseWebsocketRequests", "UpgradeStatus", "UserNamespacesSupport", "VSphereMultiVCenters", "VolumeGroupSnapshot"}} I0216 11:12:12.016591 1 operator.go:124] FeatureGates initialized: knownFeatureGates=[AWSEFSDriverVolumeMetrics AdditionalRoutingCapabilities AdminNetworkPolicy AlibabaPlatform AutomatedEtcdBackup AzureWorkloadIdentity BareMetalLoadBalancer BootcNodeManagement BuildCSIVolumes CSIDriverSharedResource ChunkSizeMiB CloudDualStackNodeIPs ClusterAPIInstall ClusterAPIInstallAWS ClusterAPIInstallAzure ClusterAPIInstallGCP ClusterAPIInstallIBMCloud ClusterAPIInstallNutanix ClusterAPIInstallOpenStack ClusterAPIInstallPowerVS ClusterAPIInstallVSphere ClusterMonitoringConfig DNSNameResolver DisableKubeletCloudCredentialProviders DynamicResourceAllocation EtcdBackendQuota EventedPLEG Example ExternalCloudProvider ExternalCloudProviderAzure ExternalCloudProviderExternal ExternalCloudProviderGCP ExternalOIDC GCPClusterHostedDNS GCPLabelsTags GatewayAPI HardwareSpeed IngressControllerLBSubnetsAWS InsightsConfig InsightsConfigAPI InsightsOnDemandDataGather InstallAlternateInfrastructureAWS KMSv1 MachineAPIMigration MachineAPIOperatorDisableMachineHealthCheckController MachineAPIProviderOpenStack MachineConfigNodes ManagedBootImages ManagedBootImagesAWS MaxUnavailableStatefulSet MetricsCollectionProfiles MetricsServer MixedCPUsAllocation MultiArchInstallAWS MultiArchInstallAzure MultiArchInstallGCP NetworkDiagnosticsConfig NetworkLiveMigration NetworkSegmentation NewOLM NodeDisruptionPolicy NodeSwap OVNObservability OnClusterBuild OpenShiftPodSecurityAdmission PersistentIPsForVirtualization PinnedImages PlatformOperators PrivateHostedZoneAWS ProcMountType RouteAdvertisements RouteExternalCertificate ServiceAccountTokenNodeBinding SetEIPForNLBIngressController SignatureStores SigstoreImageVerification StreamingCollectionEncodingToJSON StreamingCollectionEncodingToProtobuf TranslateStreamCloseWebsocketRequests UpgradeStatus UserNamespacesSupport VSphereControlPlaneMachineSet VSphereDriverConfiguration VSphereMultiVCenters VSphereStaticIPs ValidatingAdmissionPolicy VolumeGroupSnapshot] I0216 11:12:12.018348 1 requestheader_controller.go:169] Starting RequestHeaderAuthRequestController I0216 11:12:12.018359 1 shared_informer.go:311] Waiting for caches to sync for RequestHeaderAuthRequestController I0216 11:12:12.018356 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" I0216 11:12:12.018364 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file" I0216 11:12:12.018367 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file I0216 11:12:12.018371 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file I0216 11:12:12.018609 1 dynamic_serving_content.go:132] "Starting controller" name="serving-cert::/tmp/serving-cert-3587413873/tls.crt::/tmp/serving-cert-3587413873/tls.key" I0216 11:12:12.018726 1 secure_serving.go:213] Serving securely on [::]:8443 I0216 11:12:12.018747 1 tlsconfig.go:240] "Starting DynamicServingCertificateController" W0216 11:12:12.027555 1 configmapobserver.go:64] Cannot get the configuration config map: configmaps "insights-config" not found. Default configuration is used. I0216 11:12:12.027601 1 secretconfigobserver.go:216] Legacy configuration set: enabled=false endpoint=https://console.redhat.com/api/ingress/v1/upload conditional_gatherer_endpoint=https://console.redhat.com/api/gathering/v2/%s/gathering_rules interval=2h0m0s token=false reportEndpoint=https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports initialPollingDelay=1m0s minRetryTime=30s pollingTimeout=50m0s processingStatusEndpoint=https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/request/%s/status I0216 11:12:12.027718 1 base_controller.go:67] Waiting for caches to sync for ConfigController I0216 11:12:12.034616 1 secretconfigobserver.go:249] Found cloud.openshift.com token I0216 11:12:12.034632 1 secretconfigobserver.go:204] Legacy configuration updated: enabled=true endpoint=https://console.redhat.com/api/ingress/v1/upload conditional_gatherer_endpoint=https://console.redhat.com/api/gathering/v2/%s/gathering_rules interval=2h0m0s token=true reportEndpoint=https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports initialPollingDelay=1m0s minRetryTime=30s pollingTimeout=50m0s processingStatusEndpoint=https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/request/%s/status I0216 11:12:12.040446 1 secretconfigobserver.go:119] support secret does not exist I0216 11:12:12.046359 1 secretconfigobserver.go:249] Found cloud.openshift.com token I0216 11:12:12.053710 1 secretconfigobserver.go:119] support secret does not exist I0216 11:12:12.054971 1 recorder.go:161] Pruning old reports every 5h47m18s, max age is 288h0m0s I0216 11:12:12.062542 1 periodic.go:214] Running clusterconfig gatherer I0216 11:12:12.062542 1 controllerstatus.go:80] name=insightsuploader healthy=true reason= message= I0216 11:12:12.062564 1 insightsuploader.go:86] Reporting status periodically to https://console.redhat.com/api/ingress/v1/upload every 2h0m0s, starting in 1m30s I0216 11:12:12.062595 1 tasks_processing.go:45] number of workers: 64 I0216 11:12:12.062602 1 controllerstatus.go:80] name=insightsreport healthy=true reason= message= I0216 11:12:12.062612 1 tasks_processing.go:69] worker 0 listening for tasks. I0216 11:12:12.062616 1 insightsreport.go:296] Starting report retriever I0216 11:12:12.062619 1 tasks_processing.go:69] worker 11 listening for tasks. I0216 11:12:12.062621 1 insightsreport.go:298] Insights analysis reports will be downloaded from the https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports endpoint with a delay of 1m0s I0216 11:12:12.062625 1 tasks_processing.go:69] worker 1 listening for tasks. I0216 11:12:12.062629 1 tasks_processing.go:69] worker 2 listening for tasks. I0216 11:12:12.062631 1 tasks_processing.go:69] worker 14 listening for tasks. I0216 11:12:12.062635 1 tasks_processing.go:69] worker 3 listening for tasks. I0216 11:12:12.062637 1 tasks_processing.go:69] worker 12 listening for tasks. I0216 11:12:12.062639 1 tasks_processing.go:69] worker 4 listening for tasks. I0216 11:12:12.062642 1 tasks_processing.go:69] worker 13 listening for tasks. I0216 11:12:12.062644 1 tasks_processing.go:69] worker 5 listening for tasks. I0216 11:12:12.062641 1 tasks_processing.go:69] worker 18 listening for tasks. I0216 11:12:12.062651 1 tasks_processing.go:71] worker 5 working on sap_config task. I0216 11:12:12.062653 1 tasks_processing.go:69] worker 7 listening for tasks. I0216 11:12:12.062655 1 tasks_processing.go:69] worker 19 listening for tasks. I0216 11:12:12.062658 1 tasks_processing.go:69] worker 8 listening for tasks. I0216 11:12:12.062658 1 tasks_processing.go:69] worker 17 listening for tasks. I0216 11:12:12.062661 1 tasks_processing.go:69] worker 20 listening for tasks. I0216 11:12:12.062663 1 tasks_processing.go:69] worker 9 listening for tasks. I0216 11:12:12.062657 1 tasks_processing.go:69] worker 16 listening for tasks. I0216 11:12:12.062671 1 tasks_processing.go:69] worker 22 listening for tasks. I0216 11:12:12.062672 1 tasks_processing.go:69] worker 47 listening for tasks. I0216 11:12:12.062674 1 tasks_processing.go:71] worker 16 working on oauths task. I0216 11:12:12.062673 1 tasks_processing.go:71] worker 20 working on openstack_controlplanes task. I0216 11:12:12.062674 1 tasks_processing.go:69] worker 10 listening for tasks. I0216 11:12:12.062678 1 tasks_processing.go:69] worker 35 listening for tasks. I0216 11:12:12.062682 1 tasks_processing.go:69] worker 36 listening for tasks. I0216 11:12:12.062665 1 tasks_processing.go:69] worker 15 listening for tasks. I0216 11:12:12.062688 1 tasks_processing.go:69] worker 51 listening for tasks. I0216 11:12:12.062689 1 tasks_processing.go:71] worker 18 working on aggregated_monitoring_cr_names task. I0216 11:12:12.062690 1 tasks_processing.go:69] worker 24 listening for tasks. I0216 11:12:12.062694 1 tasks_processing.go:69] worker 52 listening for tasks. I0216 11:12:12.062694 1 tasks_processing.go:69] worker 37 listening for tasks. I0216 11:12:12.062691 1 tasks_processing.go:71] worker 19 working on openstack_dataplanedeployments task. I0216 11:12:12.062701 1 tasks_processing.go:69] worker 53 listening for tasks. I0216 11:12:12.062700 1 tasks_processing.go:69] worker 38 listening for tasks. I0216 11:12:12.062679 1 tasks_processing.go:69] worker 49 listening for tasks. I0216 11:12:12.062684 1 tasks_processing.go:69] worker 50 listening for tasks. I0216 11:12:12.062649 1 tasks_processing.go:71] worker 13 working on pod_network_connectivity_checks task. I0216 11:12:12.062706 1 tasks_processing.go:71] worker 8 working on image_pruners task. I0216 11:12:12.062709 1 tasks_processing.go:71] worker 17 working on pdbs task. I0216 11:12:12.062714 1 tasks_processing.go:69] worker 40 listening for tasks. I0216 11:12:12.062666 1 tasks_processing.go:69] worker 21 listening for tasks. I0216 11:12:12.062721 1 tasks_processing.go:69] worker 41 listening for tasks. I0216 11:12:12.062698 1 tasks_processing.go:69] worker 25 listening for tasks. I0216 11:12:12.062728 1 tasks_processing.go:69] worker 30 listening for tasks. I0216 11:12:12.062703 1 tasks_processing.go:69] worker 26 listening for tasks. I0216 11:12:12.062716 1 tasks_processing.go:69] worker 28 listening for tasks. I0216 11:12:12.062648 1 tasks_processing.go:69] worker 6 listening for tasks. I0216 11:12:12.062736 1 tasks_processing.go:69] worker 43 listening for tasks. I0216 11:12:12.062676 1 tasks_processing.go:69] worker 48 listening for tasks. I0216 11:12:12.062736 1 tasks_processing.go:69] worker 31 listening for tasks. I0216 11:12:12.062742 1 tasks_processing.go:69] worker 33 listening for tasks. I0216 11:12:12.062744 1 tasks_processing.go:69] worker 46 listening for tasks. I0216 11:12:12.062745 1 tasks_processing.go:71] worker 7 working on qemu_kubevirt_launcher_logs task. I0216 11:12:12.062729 1 tasks_processing.go:69] worker 42 listening for tasks. I0216 11:12:12.062684 1 tasks_processing.go:69] worker 23 listening for tasks. I0216 11:12:12.062710 1 tasks_processing.go:69] worker 27 listening for tasks. I0216 11:12:12.062748 1 tasks_processing.go:69] worker 32 listening for tasks. I0216 11:12:12.062755 1 tasks_processing.go:69] worker 54 listening for tasks. I0216 11:12:12.062757 1 tasks_processing.go:71] worker 14 working on machines task. I0216 11:12:12.062752 1 tasks_processing.go:69] worker 45 listening for tasks. I0216 11:12:12.062721 1 tasks_processing.go:69] worker 29 listening for tasks. I0216 11:12:12.062765 1 tasks_processing.go:69] worker 55 listening for tasks. I0216 11:12:12.062764 1 tasks_processing.go:69] worker 44 listening for tasks. I0216 11:12:12.062767 1 tasks_processing.go:71] worker 11 working on operators_pods_and_events task. I0216 11:12:12.062769 1 tasks_processing.go:71] worker 1 working on machine_healthchecks task. I0216 11:12:12.062774 1 tasks_processing.go:69] worker 56 listening for tasks. I0216 11:12:12.062775 1 tasks_processing.go:71] worker 12 working on ingress task. I0216 11:12:12.062774 1 tasks_processing.go:71] worker 44 working on machine_configs task. I0216 11:12:12.062780 1 tasks_processing.go:71] worker 10 working on mutating_webhook_configurations task. I0216 11:12:12.062785 1 tasks_processing.go:69] worker 57 listening for tasks. I0216 11:12:12.062784 1 tasks_processing.go:71] worker 4 working on active_alerts task. I0216 11:12:12.062790 1 tasks_processing.go:71] worker 57 working on metrics task. I0216 11:12:12.062796 1 tasks_processing.go:71] worker 3 working on nodenetworkconfigurationpolicies task. I0216 11:12:12.062843 1 tasks_processing.go:71] worker 15 working on support_secret task. I0216 11:12:12.062880 1 tasks_processing.go:71] worker 27 working on openshift_apiserver_operator_logs task. I0216 11:12:12.062778 1 tasks_processing.go:71] worker 56 working on monitoring_persistent_volumes task. I0216 11:12:12.062776 1 tasks_processing.go:71] worker 47 working on config_maps task. I0216 11:12:12.062668 1 tasks_processing.go:71] worker 9 working on openstack_dataplanenodesets task. I0216 11:12:12.062674 1 tasks_processing.go:69] worker 34 listening for tasks. I0216 11:12:12.062998 1 tasks_processing.go:71] worker 34 working on container_runtime_configs task. I0216 11:12:12.062765 1 tasks_processing.go:71] worker 0 working on container_images task. I0216 11:12:12.062708 1 tasks_processing.go:69] worker 39 listening for tasks. I0216 11:12:12.063414 1 tasks_processing.go:71] worker 39 working on nodes task. I0216 11:12:12.062767 1 tasks_processing.go:71] worker 45 working on storage_cluster task. I0216 11:12:12.062845 1 tasks_processing.go:71] worker 46 working on proxies task. W0216 11:12:12.062811 1 gather_most_recent_metrics.go:64] Unable to load metrics client, no metrics will be collected: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 11:12:12.063602 1 tasks_processing.go:71] worker 57 working on storage_classes task. I0216 11:12:12.062753 1 tasks_processing.go:71] worker 2 working on version task. I0216 11:12:12.063625 1 gather.go:180] gatherer "clusterconfig" function "metrics" took 802.863µs to process 0 records I0216 11:12:12.062760 1 tasks_processing.go:69] worker 63 listening for tasks. I0216 11:12:12.062768 1 tasks_processing.go:71] worker 29 working on networks task. I0216 11:12:12.063737 1 tasks_processing.go:71] worker 63 working on service_accounts task. I0216 11:12:12.062771 1 tasks_processing.go:71] worker 22 working on certificate_signing_requests task. W0216 11:12:12.062813 1 gather_active_alerts.go:54] Unable to load alerts client, no alerts will be collected: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 11:12:12.062824 1 tasks_processing.go:69] worker 61 listening for tasks. I0216 11:12:12.062828 1 tasks_processing.go:69] worker 58 listening for tasks. I0216 11:12:12.062831 1 tasks_processing.go:71] worker 25 working on cluster_apiserver task. I0216 11:12:12.063904 1 tasks_processing.go:71] worker 61 working on ceph_cluster task. I0216 11:12:12.062832 1 tasks_processing.go:71] worker 31 working on scheduler_logs task. I0216 11:12:12.063980 1 tasks_processing.go:71] worker 58 working on operators task. I0216 11:12:12.062836 1 tasks_processing.go:71] worker 35 working on ingress_certificates task. I0216 11:12:12.062836 1 tasks_processing.go:71] worker 23 working on jaegers task. I0216 11:12:12.062831 1 tasks_processing.go:69] worker 60 listening for tasks. I0216 11:12:12.064094 1 tasks_processing.go:71] worker 60 working on kube_controller_manager_logs task. I0216 11:12:12.062836 1 tasks_processing.go:71] worker 30 working on image task. I0216 11:12:12.062839 1 tasks_processing.go:71] worker 36 working on node_logs task. I0216 11:12:12.062841 1 tasks_processing.go:71] worker 33 working on nodenetworkstates task. I0216 11:12:12.062842 1 tasks_processing.go:71] worker 26 working on tsdb_status task. W0216 11:12:12.064525 1 gather_prometheus_tsdb_status.go:38] Unable to load metrics client, tsdb status cannot be collected: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 11:12:12.062846 1 tasks_processing.go:71] worker 28 working on silenced_alerts task. W0216 11:12:12.064543 1 gather_silenced_alerts.go:38] Unable to load alerts client, no alerts will be collected: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 11:12:12.062846 1 tasks_processing.go:69] worker 59 listening for tasks. I0216 11:12:12.064554 1 tasks_processing.go:71] worker 59 working on sap_datahubs task. I0216 11:12:12.062750 1 tasks_processing.go:69] worker 62 listening for tasks. I0216 11:12:12.062849 1 tasks_processing.go:71] worker 42 working on openshift_authentication_logs task. I0216 11:12:12.064669 1 tasks_processing.go:71] worker 62 working on install_plans task. I0216 11:12:12.062850 1 tasks_processing.go:71] worker 6 working on number_of_pods_and_netnamespaces_with_sdn_annotations task. I0216 11:12:12.062855 1 tasks_processing.go:71] worker 38 working on olm_operators task. I0216 11:12:12.062855 1 tasks_processing.go:71] worker 43 working on authentication task. I0216 11:12:12.062855 1 tasks_processing.go:71] worker 48 working on clusterroles task. I0216 11:12:12.062860 1 tasks_processing.go:71] worker 51 working on lokistack task. I0216 11:12:12.062862 1 tasks_processing.go:71] worker 40 working on validating_webhook_configurations task. I0216 11:12:12.062861 1 tasks_processing.go:71] worker 55 working on infrastructures task. I0216 11:12:12.062864 1 tasks_processing.go:71] worker 24 working on openstack_version task. I0216 11:12:12.062866 1 tasks_processing.go:71] worker 49 working on cost_management_metrics_configs task. I0216 11:12:12.062869 1 tasks_processing.go:71] worker 52 working on feature_gates task. I0216 11:12:12.062868 1 tasks_processing.go:71] worker 50 working on crds task. I0216 11:12:12.062872 1 tasks_processing.go:71] worker 37 working on machine_sets task. I0216 11:12:12.062873 1 tasks_processing.go:71] worker 21 working on machine_config_pools task. I0216 11:12:12.062874 1 tasks_processing.go:71] worker 32 working on openshift_logging task. I0216 11:12:12.062873 1 tasks_processing.go:71] worker 53 working on openshift_machine_api_events task. I0216 11:12:12.062878 1 tasks_processing.go:71] worker 41 working on schedulers task. I0216 11:12:12.062879 1 tasks_processing.go:71] worker 54 working on dvo_metrics task. I0216 11:12:12.063849 1 tasks_processing.go:71] worker 4 working on machine_autoscalers task. I0216 11:12:12.063864 1 gather.go:180] gatherer "clusterconfig" function "active_alerts" took 1.052649ms to process 0 records I0216 11:12:12.065464 1 gather.go:180] gatherer "clusterconfig" function "tsdb_status" took 20.779µs to process 0 records I0216 11:12:12.065476 1 gather.go:180] gatherer "clusterconfig" function "silenced_alerts" took 16.147µs to process 0 records I0216 11:12:12.065478 1 tasks_processing.go:71] worker 26 working on image_registries task. I0216 11:12:12.065779 1 tasks_processing.go:71] worker 28 working on sap_pods task. I0216 11:12:12.070864 1 tasks_processing.go:71] worker 5 working on overlapping_namespace_uids task. I0216 11:12:12.070880 1 gather.go:180] gatherer "clusterconfig" function "sap_config" took 8.202298ms to process 0 records E0216 11:12:12.070890 1 gather.go:143] gatherer "clusterconfig" function "machines" failed with the error: machines.machine.openshift.io is forbidden: User "system:serviceaccount:openshift-insights:gather" cannot list resource "machines" in API group "machine.openshift.io" at the cluster scope I0216 11:12:12.070896 1 gather.go:180] gatherer "clusterconfig" function "machines" took 8.096531ms to process 0 records I0216 11:12:12.070963 1 tasks_processing.go:71] worker 14 working on sap_license_management_logs task. I0216 11:12:12.071008 1 tasks_processing.go:74] worker 1 stopped. E0216 11:12:12.071022 1 gather.go:143] gatherer "clusterconfig" function "machine_healthchecks" failed with the error: machinehealthchecks.machine.openshift.io is forbidden: User "system:serviceaccount:openshift-insights:gather" cannot list resource "machinehealthchecks" in API group "machine.openshift.io" at the cluster scope I0216 11:12:12.071034 1 gather.go:180] gatherer "clusterconfig" function "machine_healthchecks" took 8.224069ms to process 0 records I0216 11:12:12.075645 1 tasks_processing.go:74] worker 13 stopped. E0216 11:12:12.075656 1 gather.go:143] gatherer "clusterconfig" function "pod_network_connectivity_checks" failed with the error: the server could not find the requested resource (get podnetworkconnectivitychecks.controlplane.operator.openshift.io) I0216 11:12:12.075666 1 gather.go:180] gatherer "clusterconfig" function "pod_network_connectivity_checks" took 12.930352ms to process 0 records I0216 11:12:12.075788 1 tasks_processing.go:74] worker 3 stopped. I0216 11:12:12.075800 1 gather.go:180] gatherer "clusterconfig" function "nodenetworkconfigurationpolicies" took 12.93674ms to process 0 records I0216 11:12:12.079633 1 tasks_processing.go:74] worker 44 stopped. I0216 11:12:12.079645 1 gather.go:180] gatherer "clusterconfig" function "machine_configs" took 16.844755ms to process 0 records I0216 11:12:12.079657 1 gather.go:180] gatherer "clusterconfig" function "openstack_dataplanedeployments" took 16.938501ms to process 0 records I0216 11:12:12.079661 1 tasks_processing.go:74] worker 19 stopped. I0216 11:12:12.080227 1 tasks_processing.go:74] worker 15 stopped. E0216 11:12:12.080240 1 gather.go:143] gatherer "clusterconfig" function "support_secret" failed with the error: secrets "support" not found I0216 11:12:12.080247 1 gather.go:180] gatherer "clusterconfig" function "support_secret" took 17.33735ms to process 0 records I0216 11:12:12.080388 1 tasks_processing.go:74] worker 8 stopped. I0216 11:12:12.080605 1 gather_logs.go:145] no pods in openshift-apiserver-operator namespace were found I0216 11:12:12.080690 1 recorder.go:75] Recording config/clusteroperator/imageregistry.operator.openshift.io/imagepruner/cluster with fingerprint=b72a3856e9866977a327a04fb9c445fe50727b7ffbfdcc18dc0e409fc8bf1edb I0216 11:12:12.080704 1 gather.go:180] gatherer "clusterconfig" function "image_pruners" took 17.669758ms to process 1 records I0216 11:12:12.080715 1 gather.go:180] gatherer "clusterconfig" function "openshift_apiserver_operator_logs" took 17.71932ms to process 0 records I0216 11:12:12.080722 1 tasks_processing.go:74] worker 27 stopped. I0216 11:12:12.082241 1 tasks_processing.go:74] worker 23 stopped. I0216 11:12:12.082253 1 gather.go:180] gatherer "clusterconfig" function "jaegers" took 18.16667ms to process 0 records I0216 11:12:12.082499 1 tasks_processing.go:74] worker 61 stopped. I0216 11:12:12.082511 1 gather.go:180] gatherer "clusterconfig" function "ceph_cluster" took 18.581475ms to process 0 records I0216 11:12:12.082723 1 tasks_processing.go:74] worker 45 stopped. I0216 11:12:12.082731 1 gather.go:180] gatherer "clusterconfig" function "storage_cluster" took 19.257882ms to process 0 records I0216 11:12:12.082737 1 gather.go:180] gatherer "clusterconfig" function "container_runtime_configs" took 19.717032ms to process 0 records I0216 11:12:12.082741 1 tasks_processing.go:74] worker 34 stopped. I0216 11:12:12.083994 1 tasks_processing.go:74] worker 59 stopped. I0216 11:12:12.084004 1 gather.go:180] gatherer "clusterconfig" function "sap_datahubs" took 19.431326ms to process 0 records I0216 11:12:12.084014 1 gather.go:180] gatherer "clusterconfig" function "openstack_controlplanes" took 21.321368ms to process 0 records I0216 11:12:12.084018 1 tasks_processing.go:74] worker 20 stopped. I0216 11:12:12.087820 1 tasks_processing.go:74] worker 38 stopped. I0216 11:12:12.087831 1 gather.go:180] gatherer "clusterconfig" function "olm_operators" took 22.986515ms to process 0 records I0216 11:12:12.088012 1 tasks_processing.go:74] worker 43 stopped. I0216 11:12:12.088207 1 recorder.go:75] Recording config/authentication with fingerprint=47a5c099bd26e0acf861f8099a62425097e7b935fb283728ba7f16f1e8125954 I0216 11:12:12.088218 1 gather.go:180] gatherer "clusterconfig" function "authentication" took 23.155902ms to process 1 records I0216 11:12:12.088295 1 tasks_processing.go:74] worker 10 stopped. I0216 11:12:12.088328 1 recorder.go:75] Recording config/mutatingwebhookconfigurations/aws-pod-identity with fingerprint=17c921ed3aae8c009c1573e8a0cfa04f9d0684928d32b19592d6119c1ceaeb44 I0216 11:12:12.088352 1 recorder.go:75] Recording config/mutatingwebhookconfigurations/sre-podimagespec-mutation with fingerprint=c2e1cc7b3bf63b77118d147a017fdeda9339e14a302a2361381460ad6f80168c I0216 11:12:12.088370 1 recorder.go:75] Recording config/mutatingwebhookconfigurations/sre-service-mutation with fingerprint=88a10d996fa7d9babce8ab5732e1d6f5193802fc0b7cc2dd76c03afe778bddd1 I0216 11:12:12.088376 1 gather.go:180] gatherer "clusterconfig" function "mutating_webhook_configurations" took 25.269731ms to process 3 records I0216 11:12:12.089206 1 gather_logs.go:145] no pods in openshift-kube-controller-manager namespace were found I0216 11:12:12.089219 1 tasks_processing.go:74] worker 60 stopped. I0216 11:12:12.089224 1 gather.go:180] gatherer "clusterconfig" function "kube_controller_manager_logs" took 25.113016ms to process 0 records I0216 11:12:12.089315 1 tasks_processing.go:74] worker 46 stopped. I0216 11:12:12.089373 1 recorder.go:75] Recording config/proxy with fingerprint=193e3802cc990c5260bfdd022977922cbac04d573fba74eaa511f9f5c82f7bd1 I0216 11:12:12.089386 1 gather.go:180] gatherer "clusterconfig" function "proxies" took 25.763272ms to process 1 records I0216 11:12:12.089462 1 tasks_processing.go:74] worker 29 stopped. I0216 11:12:12.089469 1 recorder.go:75] Recording config/network with fingerprint=d572ec0ce8669f302c28bfada05ea6fb36eeecb20713c13e420c1cc63429d305 I0216 11:12:12.089475 1 gather.go:180] gatherer "clusterconfig" function "networks" took 25.681637ms to process 1 records I0216 11:12:12.089698 1 tasks_processing.go:74] worker 30 stopped. I0216 11:12:12.089768 1 recorder.go:75] Recording config/image with fingerprint=bb4ad57f335633ac0dbbcf25d909c2f3216dd170a33374902f2cf49d1807db3c I0216 11:12:12.089779 1 gather.go:180] gatherer "clusterconfig" function "image" took 25.494798ms to process 1 records I0216 11:12:12.107604 1 tasks_processing.go:74] worker 51 stopped. I0216 11:12:12.107653 1 gather.go:180] gatherer "clusterconfig" function "lokistack" took 42.487254ms to process 0 records I0216 11:12:12.107667 1 gather.go:180] gatherer "clusterconfig" function "nodenetworkstates" took 43.152184ms to process 0 records I0216 11:12:12.107674 1 gather.go:180] gatherer "clusterconfig" function "openstack_version" took 42.324546ms to process 0 records I0216 11:12:12.107675 1 tasks_processing.go:74] worker 33 stopped. I0216 11:12:12.107679 1 gather.go:180] gatherer "clusterconfig" function "machine_autoscalers" took 42.186406ms to process 0 records I0216 11:12:12.107682 1 tasks_processing.go:74] worker 24 stopped. I0216 11:12:12.107686 1 tasks_processing.go:74] worker 49 stopped. I0216 11:12:12.107684 1 gather.go:180] gatherer "clusterconfig" function "cost_management_metrics_configs" took 42.273727ms to process 0 records I0216 11:12:12.107692 1 tasks_processing.go:74] worker 4 stopped. I0216 11:12:12.107697 1 gather.go:180] gatherer "clusterconfig" function "machine_config_pools" took 42.217783ms to process 0 records I0216 11:12:12.107703 1 gather.go:180] gatherer "clusterconfig" function "openshift_logging" took 42.21916ms to process 0 records I0216 11:12:12.107736 1 tasks_processing.go:74] worker 21 stopped. I0216 11:12:12.107743 1 tasks_processing.go:74] worker 32 stopped. I0216 11:12:12.107761 1 gather_sap_vsystem_iptables_logs.go:60] SAP resources weren't found I0216 11:12:12.107771 1 recorder.go:75] Recording config/storage/storageclasses/gp2-csi with fingerprint=90803365e687874cb3d0c08fbe19dcc4b3f42960157891f8dc33aa9303c31626 I0216 11:12:12.107784 1 recorder.go:75] Recording config/storage/storageclasses/gp3-csi with fingerprint=af905d1f8d7ad983767a7cce02a439797d55946977a201c824fd3b9301ae31bb I0216 11:12:12.107788 1 gather.go:180] gatherer "clusterconfig" function "storage_classes" took 44.06335ms to process 2 records I0216 11:12:12.107792 1 gather.go:180] gatherer "clusterconfig" function "sap_pods" took 41.754338ms to process 0 records I0216 11:12:12.107795 1 gather.go:180] gatherer "clusterconfig" function "machine_sets" took 42.240567ms to process 0 records I0216 11:12:12.107805 1 tasks_processing.go:74] worker 28 stopped. I0216 11:12:12.107810 1 tasks_processing.go:74] worker 37 stopped. I0216 11:12:12.107813 1 tasks_processing.go:74] worker 57 stopped. I0216 11:12:12.107871 1 tasks_processing.go:74] worker 16 stopped. I0216 11:12:12.108005 1 recorder.go:75] Recording config/oauth with fingerprint=bfc44f5b3d43b46f86bf06fd2291c6a3f8866f2107973733938c85ff7189885a I0216 11:12:12.108016 1 gather.go:180] gatherer "clusterconfig" function "oauths" took 45.086568ms to process 1 records I0216 11:12:12.108022 1 gather.go:180] gatherer "clusterconfig" function "sap_license_management_logs" took 36.797466ms to process 0 records I0216 11:12:12.108029 1 controller.go:119] Initializing last reported time to 0001-01-01T00:00:00Z I0216 11:12:12.108053 1 controller.go:203] Source periodic-clusterconfig *controllerstatus.Simple is not ready I0216 11:12:12.108059 1 controller.go:203] Source periodic-conditional *controllerstatus.Simple is not ready I0216 11:12:12.108062 1 controller.go:203] Source periodic-workloads *controllerstatus.Simple is not ready I0216 11:12:12.108095 1 controller.go:457] The operator is still being initialized I0216 11:12:12.108032 1 tasks_processing.go:74] worker 14 stopped. I0216 11:12:12.108108 1 controller.go:482] The operator is healthy I0216 11:12:12.108150 1 recorder.go:75] Recording config/ingress with fingerprint=ba589949fe22d9e7161f23cd55f53dfc825ac6a291cabc304b36683fca181056 I0216 11:12:12.108164 1 gather.go:180] gatherer "clusterconfig" function "ingress" took 45.020195ms to process 1 records I0216 11:12:12.108169 1 gather.go:180] gatherer "clusterconfig" function "monitoring_persistent_volumes" took 45.221552ms to process 0 records I0216 11:12:12.108172 1 tasks_processing.go:74] worker 12 stopped. I0216 11:12:12.108175 1 tasks_processing.go:74] worker 56 stopped. I0216 11:12:12.108178 1 tasks_processing.go:74] worker 9 stopped. I0216 11:12:12.108174 1 gather.go:180] gatherer "clusterconfig" function "openstack_dataplanenodesets" took 45.156932ms to process 0 records I0216 11:12:12.109082 1 tasks_processing.go:74] worker 55 stopped. I0216 11:12:12.109678 1 recorder.go:75] Recording config/infrastructure with fingerprint=18127b0a6453fb513fc10b2e0e4076afc55b213ba4af115daf6cad3aa3cd5310 I0216 11:12:12.109691 1 gather.go:180] gatherer "clusterconfig" function "infrastructures" took 43.798707ms to process 1 records I0216 11:12:12.109762 1 recorder.go:75] Recording config/pdbs/openshift-image-registry/image-registry with fingerprint=b9e647772be6b86bed7865b9787090a9598c81a85ffe023087a75822d73a8571 I0216 11:12:12.109785 1 tasks_processing.go:74] worker 17 stopped. I0216 11:12:12.109796 1 recorder.go:75] Recording config/pdbs/openshift-ingress/router-default with fingerprint=d59c1dad1f91808416f1939fea5207d9232051524b48261c4cf7a72b0443b80c I0216 11:12:12.109815 1 recorder.go:75] Recording config/pdbs/openshift-operator-lifecycle-manager/packageserver-pdb with fingerprint=cc3e7031f37effe64268327d10050e7ae080bc4833a5b593a2cb477113550b63 I0216 11:12:12.109829 1 gather.go:180] gatherer "clusterconfig" function "pdbs" took 46.906098ms to process 3 records I0216 11:12:12.111115 1 tasks_processing.go:74] worker 39 stopped. I0216 11:12:12.111776 1 recorder.go:75] Recording config/node/ip-10-0-140-34.ec2.internal with fingerprint=6c4e76f7dd81e9aa13797f925f93d8b2fc75a22aba29de9153d34247754b3a24 I0216 11:12:12.111913 1 recorder.go:75] Recording config/node/ip-10-0-155-178.ec2.internal with fingerprint=8828b93fa00fb7b64ecc77fd06034a75c3b436794104f097fa02c461a33a03f1 I0216 11:12:12.112012 1 recorder.go:75] Recording config/node/ip-10-0-171-9.ec2.internal with fingerprint=88dedce9fe04abb84191071ee45288949f80b15316e1f2d593405b03fa5c60e6 I0216 11:12:12.112037 1 gather.go:180] gatherer "clusterconfig" function "nodes" took 47.684229ms to process 3 records I0216 11:12:12.113652 1 tasks_processing.go:74] worker 40 stopped. I0216 11:12:12.113752 1 recorder.go:75] Recording config/validatingwebhookconfigurations/multus.openshift.io with fingerprint=e928b9dd321e014fdf722636fdadd2338ce690096db018d205cee95ee9f5c036 I0216 11:12:12.113806 1 recorder.go:75] Recording config/validatingwebhookconfigurations/network-node-identity.openshift.io with fingerprint=b5fb422520bdcd4cfc548da2a620c5443fecc2849fa541e15f3b93a2c134a2c8 I0216 11:12:12.113822 1 recorder.go:75] Recording config/validatingwebhookconfigurations/performance-addon-operator with fingerprint=c7df6dea509f47e5d1f1a480559c16fd52da0c876afdc081280af847be2ba58d I0216 11:12:12.113836 1 recorder.go:75] Recording config/validatingwebhookconfigurations/snapshot.storage.k8s.io with fingerprint=b2b6a6508d383e9bedfeb481397fcf61a64e68828579fa679040a91b76181eef I0216 11:12:12.113860 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-clusterrolebindings-validation with fingerprint=a013475b1fc1abe0dbfc89d0a517f44bf75cf96f42db9d1de1e02a099af6ea6a I0216 11:12:12.113878 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-clusterroles-validation with fingerprint=3915c8abd870aeb5401b1721d3d04c8dd762ad04acf4523bf4d193124f9e6c3f I0216 11:12:12.113897 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-ingress-config-validation with fingerprint=29d3ebeb8762c0dc3b4b187352c1dcb2a8c99ed115d121d659ba37cd4120767d I0216 11:12:12.113912 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-network-operator-validation with fingerprint=6b6a5e1645f372a7dc6a217eda4b69999313156e329858b811d202762b8e02fa I0216 11:12:12.113943 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-regular-user-validation with fingerprint=7959c02642c0f15ba9f7e8b4c5fb2a65629384af5c2bb4ab4522d976af49e9d6 I0216 11:12:12.113960 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-scc-validation with fingerprint=f367105e0fedc9d699e7acd022a9d6244a78f87d3dae4fbaa8ad660d1121eed1 I0216 11:12:12.113977 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-serviceaccount-validation with fingerprint=f2cd5fc0092a3ae8fb5e2e0e8288f1ae07ae4ec9f97f9ac567f59501986eda52 I0216 11:12:12.113993 1 recorder.go:75] Recording config/validatingwebhookconfigurations/sre-techpreviewnoupgrade-validation with fingerprint=35513c5023b3c96d1e100731a6d56204c9b33cc2e6e4bcd217a7709049b16352 I0216 11:12:12.114000 1 gather.go:180] gatherer "clusterconfig" function "validating_webhook_configurations" took 48.469235ms to process 12 records I0216 11:12:12.116125 1 tasks_processing.go:74] worker 26 stopped. I0216 11:12:12.116627 1 recorder.go:75] Recording config/clusteroperator/imageregistry.operator.openshift.io/config/cluster with fingerprint=333a9fe219db9e7008c45ee355a761049e3aa5f08dc4c69d3c8a576acd64d09a I0216 11:12:12.116642 1 gather.go:180] gatherer "clusterconfig" function "image_registries" took 50.299541ms to process 1 records I0216 11:12:12.118559 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file I0216 11:12:12.118560 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file I0216 11:12:12.118619 1 shared_informer.go:318] Caches are synced for RequestHeaderAuthRequestController I0216 11:12:12.126930 1 tasks_processing.go:74] worker 53 stopped. I0216 11:12:12.126942 1 gather.go:180] gatherer "clusterconfig" function "openshift_machine_api_events" took 61.483567ms to process 0 records I0216 11:12:12.127982 1 base_controller.go:73] Caches are synced for ConfigController I0216 11:12:12.127996 1 base_controller.go:110] Starting #1 worker of ConfigController controller ... I0216 11:12:12.129076 1 tasks_processing.go:74] worker 41 stopped. I0216 11:12:12.129139 1 recorder.go:75] Recording config/schedulers/cluster with fingerprint=ac978f0b93bee738562d2e1c29c8cc37e40baa32dd662a6285423f416343f7ae I0216 11:12:12.129151 1 gather.go:180] gatherer "clusterconfig" function "schedulers" took 63.625462ms to process 1 records I0216 11:12:12.129220 1 tasks_processing.go:74] worker 36 stopped. I0216 11:12:12.129228 1 gather.go:180] gatherer "clusterconfig" function "node_logs" took 64.874471ms to process 0 records I0216 11:12:12.131481 1 tasks_processing.go:74] worker 52 stopped. I0216 11:12:12.131635 1 recorder.go:75] Recording config/featuregate with fingerprint=f07d19e765483ac4b1f601b21852450c5eba6538f79c8e2b74430ccaa21a29ea I0216 11:12:12.131651 1 gather.go:180] gatherer "clusterconfig" function "feature_gates" took 66.049806ms to process 1 records I0216 11:12:12.133154 1 tasks_processing.go:74] worker 25 stopped. I0216 11:12:12.133459 1 recorder.go:75] Recording config/apiserver with fingerprint=6cad8ca9e7586ea82c5ac51d3068bff5f37a439dd6f5469a48797c01d14e0090 I0216 11:12:12.133476 1 gather.go:180] gatherer "clusterconfig" function "cluster_apiserver" took 69.273281ms to process 1 records I0216 11:12:12.133850 1 tasks_processing.go:74] worker 22 stopped. I0216 11:12:12.133867 1 gather.go:180] gatherer "clusterconfig" function "certificate_signing_requests" took 70.071558ms to process 0 records I0216 11:12:12.134007 1 tasks_processing.go:74] worker 5 stopped. W0216 11:12:12.134024 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 11:12:12.134028 1 recorder.go:75] Recording config/namespaces_with_overlapping_uids with fingerprint=4f53cda18c2baa0c0354bb5f9a3ecbe5ed12ab4d8e11ba873c2f11161202b945 I0216 11:12:12.134037 1 gather.go:180] gatherer "clusterconfig" function "overlapping_namespace_uids" took 63.128632ms to process 1 records I0216 11:12:12.135798 1 sca.go:98] Pulling SCA certificates from https://api.openshift.com/api/accounts_mgmt/v1/certificates. Next check is in 8h0m0s I0216 11:12:12.135856 1 cluster_transfer.go:78] checking the availability of cluster transfer. Next check is in 12h0m0s I0216 11:12:12.135895 1 tasks_processing.go:74] worker 18 stopped. W0216 11:12:12.135913 1 operator.go:286] started I0216 11:12:12.135913 1 gather.go:180] gatherer "clusterconfig" function "aggregated_monitoring_cr_names" took 73.197716ms to process 0 records I0216 11:12:12.135922 1 base_controller.go:67] Waiting for caches to sync for LoggingSyncer I0216 11:12:12.137656 1 gather_logs.go:145] no pods in openshift-authentication namespace were found I0216 11:12:12.137669 1 tasks_processing.go:74] worker 42 stopped. I0216 11:12:12.137674 1 gather.go:180] gatherer "clusterconfig" function "openshift_authentication_logs" took 73.052325ms to process 0 records I0216 11:12:12.137703 1 tasks_processing.go:74] worker 2 stopped. I0216 11:12:12.137978 1 recorder.go:75] Recording config/version with fingerprint=7a45e18531b97e9b78896c9ff358df500fa54583e58b6f3e7517dcecef20f4d7 I0216 11:12:12.137990 1 recorder.go:75] Recording config/id with fingerprint=7b936dfcc4304ec905a79d0fc193c09d9caf217aa316ac225868775fe1fb741b I0216 11:12:12.137998 1 gather.go:180] gatherer "clusterconfig" function "version" took 74.077386ms to process 2 records I0216 11:12:12.139022 1 tasks_processing.go:74] worker 0 stopped. I0216 11:12:12.140045 1 recorder.go:75] Recording config/pod/openshift-ovn-kubernetes/ovnkube-node-27brn with fingerprint=0ecd83f8a299efbce45b8072fc7e1e7f9921c748428c8d3303fa981ca7bf6c5a I0216 11:12:12.140089 1 recorder.go:75] Recording config/running_containers with fingerprint=ad6b828a5c55a46bead2914df287611ef48af5256b39df3690e88d6c70a83047 I0216 11:12:12.140097 1 gather.go:180] gatherer "clusterconfig" function "container_images" took 76.008548ms to process 2 records I0216 11:12:12.144831 1 gather_logs.go:145] no pods in openshift-kube-scheduler namespace were found I0216 11:12:12.144845 1 tasks_processing.go:74] worker 31 stopped. I0216 11:12:12.144851 1 gather.go:180] gatherer "clusterconfig" function "scheduler_logs" took 80.889426ms to process 0 records I0216 11:12:12.146706 1 gather_logs.go:145] no pods in namespace were found I0216 11:12:12.146718 1 tasks_processing.go:74] worker 7 stopped. I0216 11:12:12.146723 1 gather.go:180] gatherer "clusterconfig" function "qemu_kubevirt_launcher_logs" took 83.967322ms to process 0 records I0216 11:12:12.146724 1 configmapobserver.go:84] configmaps "insights-config" not found I0216 11:12:12.146914 1 controller.go:203] Source scaController *sca.Controller is not ready I0216 11:12:12.146927 1 controller.go:203] Source clusterTransferController *clustertransfer.Controller is not ready I0216 11:12:12.146933 1 controller.go:203] Source periodic-clusterconfig *controllerstatus.Simple is not ready I0216 11:12:12.146939 1 controller.go:203] Source periodic-conditional *controllerstatus.Simple is not ready I0216 11:12:12.146945 1 controller.go:203] Source periodic-workloads *controllerstatus.Simple is not ready I0216 11:12:12.146961 1 controller.go:457] The operator is still being initialized I0216 11:12:12.146970 1 controller.go:482] The operator is healthy I0216 11:12:12.148999 1 tasks_processing.go:74] worker 48 stopped. I0216 11:12:12.149169 1 recorder.go:75] Recording cluster-scoped-resources/rbac.authorization.k8s.io/clusterroles/admin with fingerprint=2d4907f46c2f6c97c8ffccc40464be28da8eaa393894bfcfab6334fec182b034 I0216 11:12:12.149230 1 recorder.go:75] Recording cluster-scoped-resources/rbac.authorization.k8s.io/clusterroles/edit with fingerprint=06de992d2b1733c4c5b775aa277d65780b7428153b9a2c024d6474a0da596045 I0216 11:12:12.149242 1 gather.go:180] gatherer "clusterconfig" function "clusterroles" took 84.069402ms to process 2 records I0216 11:12:12.149988 1 tasks_processing.go:74] worker 50 stopped. I0216 11:12:12.153329 1 recorder.go:75] Recording config/crd/volumesnapshots.snapshot.storage.k8s.io with fingerprint=e8ce138fdb3311845093cb6192fdff72e91bd4c057446f8c231284839859e1fe I0216 11:12:12.153503 1 recorder.go:75] Recording config/crd/volumesnapshotcontents.snapshot.storage.k8s.io with fingerprint=c9d7d8d1ec42733dc09bd586812c16487783dbb663f28b797b774abdfdc68407 I0216 11:12:12.153512 1 gather.go:180] gatherer "clusterconfig" function "crds" took 84.551337ms to process 2 records I0216 11:12:12.155097 1 requests.go:204] Asking for SCA certificate for x86_64 architecture I0216 11:12:12.155443 1 prometheus_rules.go:88] Prometheus rules successfully created I0216 11:12:12.156477 1 tasks_processing.go:74] worker 47 stopped. E0216 11:12:12.156492 1 gather.go:143] gatherer "clusterconfig" function "config_maps" failed with the error: configmaps "cluster-monitoring-config" not found E0216 11:12:12.156499 1 gather.go:143] gatherer "clusterconfig" function "config_maps" failed with the error: configmaps "gateway-mode-config" not found E0216 11:12:12.156502 1 gather.go:143] gatherer "clusterconfig" function "config_maps" failed with the error: configmaps "insights-config" not found I0216 11:12:12.156521 1 recorder.go:75] Recording config/configmaps/openshift-config/kube-root-ca.crt/ca.crt with fingerprint=d476c7d3f5b104863f08f481b1264dcc68cc272ecefb0ecb709b18a6afab034d I0216 11:12:12.156529 1 recorder.go:75] Recording config/configmaps/openshift-config/openshift-install/invoker with fingerprint=76b482f683cd3ef9da02debac5b26080a5aeb06ff768ee5c21117514dff29d8a I0216 11:12:12.156532 1 recorder.go:75] Recording config/configmaps/openshift-config/openshift-install/version with fingerprint=0bddb88b072029f25dde6f44cb877a44fb2f65ed4864939fbf7a3e42c0a485f6 I0216 11:12:12.156536 1 recorder.go:75] Recording config/configmaps/openshift-config/openshift-service-ca.crt/service-ca.crt with fingerprint=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 I0216 11:12:12.156554 1 recorder.go:75] Recording config/configmaps/openshift-config/rosa-brand-logo/rosa-brand-logo.svg with fingerprint=6ed8ca4dd7a8eee7249182bc006e9649ce84d76c551ddfaaa33e55d8c4cc1ed0 I0216 11:12:12.156562 1 recorder.go:75] Recording config/configmaps/kube-system/cluster-config-v1/install-config with fingerprint=ab3811c6b83fd7b8e920094cfa3080d1b4ee3c35ec4c8379437b21d27bd6608d I0216 11:12:12.156566 1 gather.go:180] gatherer "clusterconfig" function "config_maps" took 93.533511ms to process 6 records E0216 11:12:12.157990 1 cluster_transfer.go:90] failed to pull cluster transfer: unable to retrieve cluster transfer data from https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/: Get "https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/?search=cluster_uuid+is+%!c(MISSING)4e6d4c-5438-42b8-a8fd-17aea3f0f4e6%!+(MISSING)and+status+is+%!a(MISSING)ccepted%!"(MISSING): dial tcp: lookup api.openshift.com on 172.30.0.10:53: read udp 10.129.0.10:43849->172.30.0.10:53: read: connection refused I0216 11:12:12.158002 1 controllerstatus.go:80] name=clusterTransferController healthy=true reason=Disconnected message=failed to pull cluster transfer: unable to retrieve cluster transfer data from https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/: Get "https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/?search=cluster_uuid+is+%273c4e6d4c-5438-42b8-a8fd-17aea3f0f4e6%27+and+status+is+%27accepted%27": dial tcp: lookup api.openshift.com on 172.30.0.10:53: read udp 10.129.0.10:43849->172.30.0.10:53: read: connection refused W0216 11:12:12.158026 1 sca.go:117] Failed to pull SCA certs from https://api.openshift.com/api/accounts_mgmt/v1/certificates: unable to retrieve SCA certs data from https://api.openshift.com/api/accounts_mgmt/v1/certificates: Post "https://api.openshift.com/api/accounts_mgmt/v1/certificates": dial tcp: lookup api.openshift.com on 172.30.0.10:53: read udp 10.129.0.10:43849->172.30.0.10:53: read: connection refused I0216 11:12:12.158039 1 controllerstatus.go:80] name=scaController healthy=true reason=NonHTTPError message=Failed to pull SCA certs from https://api.openshift.com/api/accounts_mgmt/v1/certificates: unable to retrieve SCA certs data from https://api.openshift.com/api/accounts_mgmt/v1/certificates: Post "https://api.openshift.com/api/accounts_mgmt/v1/certificates": dial tcp: lookup api.openshift.com on 172.30.0.10:53: read udp 10.129.0.10:43849->172.30.0.10:53: read: connection refused I0216 11:12:12.159851 1 tasks_processing.go:74] worker 6 stopped. I0216 11:12:12.159865 1 gather.go:180] gatherer "clusterconfig" function "number_of_pods_and_netnamespaces_with_sdn_annotations" took 95.056481ms to process 0 records I0216 11:12:12.170487 1 tasks_processing.go:74] worker 35 stopped. E0216 11:12:12.170499 1 gather.go:143] gatherer "clusterconfig" function "ingress_certificates" failed with the error: failed to get secret 'router-certs-default' in namespace 'openshift-ingress': secrets "router-certs-default" not found E0216 11:12:12.170505 1 gather.go:143] gatherer "clusterconfig" function "ingress_certificates" failed with the error: failed to get secret '2ogaijovdnqto6f3h1skd20ln5vofufh-primary-cert-bundle-secret' in namespace 'openshift-ingress-operator': secrets "2ogaijovdnqto6f3h1skd20ln5vofufh-primary-cert-bundle-secret" not found I0216 11:12:12.170552 1 recorder.go:75] Recording aggregated/ingress_controllers_certs with fingerprint=5b423e626f08cb5155cbf882f625ae867b4d5b3135b5b2c181f1814aa0bd7b1e I0216 11:12:12.170564 1 gather.go:180] gatherer "clusterconfig" function "ingress_certificates" took 106.469975ms to process 1 records I0216 11:12:12.236775 1 base_controller.go:73] Caches are synced for LoggingSyncer I0216 11:12:12.236792 1 base_controller.go:110] Starting #1 worker of LoggingSyncer controller ... W0216 11:12:13.132670 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 11:12:13.341782 1 gather_cluster_operator_pods_and_events.go:119] Found 20 pods with 24 containers I0216 11:12:13.341794 1 gather_cluster_operator_pods_and_events.go:233] Maximum buffer size: 1048576 bytes I0216 11:12:13.342174 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns container dns-default-6vg7p pod in namespace openshift-dns (previous: false). I0216 11:12:13.360470 1 gather_cluster_operators.go:184] Unable to get configs.samples.operator.openshift.io resource due to: configs.samples.operator.openshift.io "cluster" not found I0216 11:12:13.572347 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-6vg7p pod in namespace openshift-dns for failing operator dns (previous: false): "container \"dns\" in pod \"dns-default-6vg7p\" is waiting to start: ContainerCreating" I0216 11:12:13.572362 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"dns\" in pod \"dns-default-6vg7p\" is waiting to start: ContainerCreating" I0216 11:12:13.572369 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for kube-rbac-proxy container dns-default-6vg7p pod in namespace openshift-dns (previous: false). I0216 11:12:13.744332 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-6vg7p pod in namespace openshift-dns for failing operator kube-rbac-proxy (previous: false): "container \"kube-rbac-proxy\" in pod \"dns-default-6vg7p\" is waiting to start: ContainerCreating" I0216 11:12:13.744362 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"kube-rbac-proxy\" in pod \"dns-default-6vg7p\" is waiting to start: ContainerCreating" I0216 11:12:13.744380 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns container dns-default-ph74l pod in namespace openshift-dns (previous: false). I0216 11:12:13.963594 1 tasks_processing.go:74] worker 58 stopped. I0216 11:12:13.963638 1 recorder.go:75] Recording config/clusteroperator/console with fingerprint=664e42db6145e2b5e5db21eae9ee048d1e65632fe3706eca1011b7b65d39b2e6 I0216 11:12:13.963668 1 recorder.go:75] Recording config/clusteroperator/csi-snapshot-controller with fingerprint=978d1d700ca9475b36b1d86961be2b7237c04ff9b282eb6a28453d149c79f3a9 I0216 11:12:13.963706 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/csisnapshotcontroller/cluster with fingerprint=5adc514f4b63e2f1ecc68bf6f9c0af70c5eea04522a49524e102721b1c41f80e I0216 11:12:13.963730 1 recorder.go:75] Recording config/clusteroperator/dns with fingerprint=680ae9bba0b6af2f45a8ba8d3c313384997bdbb11b4ec8484930b2f4402fb50c I0216 11:12:13.963746 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/dns/default with fingerprint=9e7b4ce029030d3d8c3b49af92c556acdcc415000b40d3f969dbdc42c432b47f I0216 11:12:13.963763 1 recorder.go:75] Recording config/clusteroperator/image-registry with fingerprint=3fa2aefecea1476fdb0c88235cfda79e5110efb412f82cf3e925f99a0113ad6a I0216 11:12:13.963780 1 recorder.go:75] Recording config/clusteroperator/ingress with fingerprint=88191ee1ac56930db8219f5b60be474cd3d8c0d31c94045032e2bc91d5e223ec I0216 11:12:13.963800 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/ingresscontroller/openshift-ingress-operator/default with fingerprint=3d8b67d4da3fbf9c65dd223c25ec54ebb6c5835738008d54903a434fc5bc330f I0216 11:12:13.963816 1 recorder.go:75] Recording config/clusteroperator/insights with fingerprint=2c9ee095f169a18d800f34ebe8a68ff7fb2fbfad4c6996b7d0b652d68b47ffdd I0216 11:12:13.963829 1 recorder.go:75] Recording config/clusteroperator/kube-apiserver with fingerprint=b436bc84baa3083d8353d27eb6c24ded667845bc0a393fefc40083cc8059a0cb I0216 11:12:13.963838 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/kubeapiserver/cluster with fingerprint=51503bf0b784fcf65ea46bcaf1f72ac1a5c4d5dc211934f18f27871efed05762 I0216 11:12:13.963848 1 recorder.go:75] Recording config/clusteroperator/kube-controller-manager with fingerprint=7ead93140f8486542f68704bc350cc0ca0f9c90de442c22ba5f77bacb25df547 I0216 11:12:13.963858 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/kubecontrollermanager/cluster with fingerprint=ce90c0d4f367d7da085074268031798382ae7c54fdcb0a21f15a4818fe308c11 I0216 11:12:13.963868 1 recorder.go:75] Recording config/clusteroperator/kube-scheduler with fingerprint=5307b5beef6a145f6c911e1452d48fa7fbdf861401dcf63523d1afaaa7cbc7ac I0216 11:12:13.963884 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/kubescheduler/cluster with fingerprint=f2940fb9fd20c19951dfc295eb363b7fba0c505f5ae61f01967a063099e6b60a I0216 11:12:13.963902 1 recorder.go:75] Recording config/clusteroperator/kube-storage-version-migrator with fingerprint=c2d03141f47d5e52d0f37fedb39897b55c99db93ab10e30269d7f7af0262a12d I0216 11:12:13.963911 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/kubestorageversionmigrator/cluster with fingerprint=9351181aa7e6ada41ef581ab31e13516c6b934cc95710154bafb2eb222cb58db I0216 11:12:13.963920 1 recorder.go:75] Recording config/clusteroperator/monitoring with fingerprint=e82a837222c184d7081d0d81df6f0856b9db56d5937b9aca8cccbf644c2351cd I0216 11:12:13.963989 1 recorder.go:75] Recording config/clusteroperator/network with fingerprint=f33bc92d3e33147c85a3f4afdf35bc69c4a16e9b5ec9d227f85ed8de617f37c6 I0216 11:12:13.963998 1 recorder.go:75] Recording config/clusteroperator/network.operator.openshift.io/operatorpki/openshift-ovn-kubernetes/ovn with fingerprint=626a89d20e0deaed5b6dfb533acfe65f4bb1618bd200a703b62e60c5d16d94ab I0216 11:12:13.964004 1 recorder.go:75] Recording config/clusteroperator/network.operator.openshift.io/operatorpki/openshift-ovn-kubernetes/signer with fingerprint=90410b16914712b85b3c4578716ad8c0ae072e688f4cd1e022bf76f20da3506d I0216 11:12:13.964024 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/network/cluster with fingerprint=a93d15eaecb455a0e40ecb2826eeecc1533899204ddd3c3921d15ab70af7ae75 I0216 11:12:13.964040 1 recorder.go:75] Recording config/clusteroperator/node-tuning with fingerprint=78c8743eedb0a255f2037d6acfe38efda2ab7d55115e316d1eb9eb34c5444e8c I0216 11:12:13.964058 1 recorder.go:75] Recording config/clusteroperator/openshift-apiserver with fingerprint=2cf6e5d9b4a93874249553216a42d666652d24dceb50794f84b9ec02eabbff75 I0216 11:12:13.964067 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/openshiftapiserver/cluster with fingerprint=e712e6cf27339b441e4ed1f4cde91dbde7e952698ba93407e4457db63a4a4c76 I0216 11:12:13.964077 1 recorder.go:75] Recording config/clusteroperator/openshift-controller-manager with fingerprint=195cdd17414b361fe51d304dc7d60ec58d9ea4fc206c3190f4d682b4af634345 I0216 11:12:13.964087 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/openshiftcontrollermanager/cluster with fingerprint=d71a0f4672f9b45d9fc8293bf1687afc650fd28d32e2e30de27523fe7b4eadf7 I0216 11:12:13.964095 1 recorder.go:75] Recording config/clusteroperator/openshift-samples with fingerprint=f2c87455ea68dda1551b6a4c6ec4753b462a21b39f56a89b2e9099ecae1793f1 I0216 11:12:13.964103 1 recorder.go:75] Recording config/clusteroperator/operator-lifecycle-manager with fingerprint=44a65aa1d292aa5036797b178cd9d7d4c91b883b2229a4ab213acf448307c7bd I0216 11:12:13.964112 1 recorder.go:75] Recording config/clusteroperator/operator-lifecycle-manager-catalog with fingerprint=d3b47de234aed1ce9a416c6ff0f4d97b0368a284916e6528d4fde835f6c03b29 I0216 11:12:13.964126 1 recorder.go:75] Recording config/clusteroperator/operator-lifecycle-manager-packageserver with fingerprint=a412d34e7ff0545e02e7e308fc96725bd5e062b99f8125841cd9495130b4ddb5 I0216 11:12:13.964142 1 recorder.go:75] Recording config/clusteroperator/service-ca with fingerprint=e14d95d9102085fce0acdcdd1a812f1895a6e4b562ab1d954b93b0b61affee3d I0216 11:12:13.964148 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/serviceca/cluster with fingerprint=812f7edc2cdb30e61e7f2b29454357a40b1a507a4b0c2b7729193b67f0e3b4aa I0216 11:12:13.964172 1 recorder.go:75] Recording config/clusteroperator/storage with fingerprint=cf69aa61dbb2173e539c95a44e00f20f5955478f33f9bdad40cbad8ab71b4f90 I0216 11:12:13.964184 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/clustercsidriver/ebs.csi.aws.com with fingerprint=7e1ab8f8cfcd9d249b5b213939fe5144bb83db3725475461728bea44a002c3be I0216 11:12:13.964189 1 recorder.go:75] Recording config/clusteroperator/operator.openshift.io/storage/cluster with fingerprint=8e480f8c1ce1b39baac42d8ec780c57c2592929ae0c801b61ffad49ba13f33ad I0216 11:12:13.964195 1 gather.go:180] gatherer "clusterconfig" function "operators" took 1.899580036s to process 36 records I0216 11:12:13.988837 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-ph74l pod in namespace openshift-dns for failing operator dns (previous: false): "container \"dns\" in pod \"dns-default-ph74l\" is waiting to start: ContainerCreating" I0216 11:12:13.988850 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"dns\" in pod \"dns-default-ph74l\" is waiting to start: ContainerCreating" I0216 11:12:13.988857 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for kube-rbac-proxy container dns-default-ph74l pod in namespace openshift-dns (previous: false). W0216 11:12:14.132733 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 11:12:14.157661 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-ph74l pod in namespace openshift-dns for failing operator kube-rbac-proxy (previous: false): "container \"kube-rbac-proxy\" in pod \"dns-default-ph74l\" is waiting to start: ContainerCreating" I0216 11:12:14.157677 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"kube-rbac-proxy\" in pod \"dns-default-ph74l\" is waiting to start: ContainerCreating" I0216 11:12:14.157686 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns container dns-default-s9cbj pod in namespace openshift-dns (previous: false). I0216 11:12:14.367812 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-s9cbj pod in namespace openshift-dns for failing operator dns (previous: false): "container \"dns\" in pod \"dns-default-s9cbj\" is waiting to start: ContainerCreating" I0216 11:12:14.367828 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"dns\" in pod \"dns-default-s9cbj\" is waiting to start: ContainerCreating" I0216 11:12:14.367835 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for kube-rbac-proxy container dns-default-s9cbj pod in namespace openshift-dns (previous: false). I0216 11:12:14.552997 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for dns-default-s9cbj pod in namespace openshift-dns for failing operator kube-rbac-proxy (previous: false): "container \"kube-rbac-proxy\" in pod \"dns-default-s9cbj\" is waiting to start: ContainerCreating" I0216 11:12:14.553016 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"kube-rbac-proxy\" in pod \"dns-default-s9cbj\" is waiting to start: ContainerCreating" I0216 11:12:14.553029 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns-node-resolver container node-resolver-8sngp pod in namespace openshift-dns (previous: false). I0216 11:12:14.746599 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 11:12:14.746620 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns-node-resolver container node-resolver-fl9t7 pod in namespace openshift-dns (previous: false). I0216 11:12:14.944592 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 11:12:14.944608 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for dns-node-resolver container node-resolver-sp97d pod in namespace openshift-dns (previous: false). W0216 11:12:15.132977 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 11:12:15.143419 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 11:12:15.143431 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for registry container image-registry-7cdd964f98-h72zg pod in namespace openshift-image-registry (previous: false). I0216 11:12:15.344675 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for image-registry-7cdd964f98-h72zg pod in namespace openshift-image-registry for failing operator registry (previous: false): "container \"registry\" in pod \"image-registry-7cdd964f98-h72zg\" is waiting to start: ContainerCreating" I0216 11:12:15.344690 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"registry\" in pod \"image-registry-7cdd964f98-h72zg\" is waiting to start: ContainerCreating" I0216 11:12:15.344699 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for registry container image-registry-cfdf965b5-fgvww pod in namespace openshift-image-registry (previous: false). I0216 11:12:15.547262 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for image-registry-cfdf965b5-fgvww pod in namespace openshift-image-registry for failing operator registry (previous: false): "container \"registry\" in pod \"image-registry-cfdf965b5-fgvww\" is waiting to start: ContainerCreating" I0216 11:12:15.547277 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"registry\" in pod \"image-registry-cfdf965b5-fgvww\" is waiting to start: ContainerCreating" I0216 11:12:15.547286 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for registry container image-registry-cfdf965b5-swnhj pod in namespace openshift-image-registry (previous: false). I0216 11:12:15.743529 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for image-registry-cfdf965b5-swnhj pod in namespace openshift-image-registry for failing operator registry (previous: false): "container \"registry\" in pod \"image-registry-cfdf965b5-swnhj\" is waiting to start: ContainerCreating" I0216 11:12:15.743545 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"registry\" in pod \"image-registry-cfdf965b5-swnhj\" is waiting to start: ContainerCreating" I0216 11:12:15.743554 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for node-ca container node-ca-8cd6t pod in namespace openshift-image-registry (previous: false). I0216 11:12:15.946311 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 11:12:15.946328 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for node-ca container node-ca-n46rd pod in namespace openshift-image-registry (previous: false). W0216 11:12:16.132243 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. I0216 11:12:16.144800 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 11:12:16.144813 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for node-ca container node-ca-qk5zv pod in namespace openshift-image-registry (previous: false). I0216 11:12:16.344669 1 gather_cluster_operator_pods_and_events.go:278] Error: "log buffer is empty" I0216 11:12:16.344682 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for router container router-default-678dd5994f-8b895 pod in namespace openshift-ingress (previous: false). I0216 11:12:16.546254 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for router-default-678dd5994f-8b895 pod in namespace openshift-ingress for failing operator router (previous: false): "container \"router\" in pod \"router-default-678dd5994f-8b895\" is waiting to start: ContainerCreating" I0216 11:12:16.546270 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"router\" in pod \"router-default-678dd5994f-8b895\" is waiting to start: ContainerCreating" I0216 11:12:16.546279 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for router container router-default-678dd5994f-p57tl pod in namespace openshift-ingress (previous: false). I0216 11:12:16.744903 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for router-default-678dd5994f-p57tl pod in namespace openshift-ingress for failing operator router (previous: false): "container \"router\" in pod \"router-default-678dd5994f-p57tl\" is waiting to start: ContainerCreating" I0216 11:12:16.744918 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"router\" in pod \"router-default-678dd5994f-p57tl\" is waiting to start: ContainerCreating" I0216 11:12:16.744926 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for router container router-default-7cf598948f-rn2h4 pod in namespace openshift-ingress (previous: false). I0216 11:12:16.952558 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for router-default-7cf598948f-rn2h4 pod in namespace openshift-ingress for failing operator router (previous: false): "container \"router\" in pod \"router-default-7cf598948f-rn2h4\" is waiting to start: ContainerCreating" I0216 11:12:16.952571 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"router\" in pod \"router-default-7cf598948f-rn2h4\" is waiting to start: ContainerCreating" I0216 11:12:16.952590 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for serve-healthcheck-canary container ingress-canary-8tzg5 pod in namespace openshift-ingress-canary (previous: false). W0216 11:12:17.131677 1 gather_dvo_metrics.go:210] Failed to read the DVO metrics. Trying again. W0216 11:12:17.131700 1 gather_dvo_metrics.go:117] Unable to read metrics from endpoint "http://deployment-validation-operator-metrics.openshift-deployment-validation-operator.svc:8383": DVO metrics service was not available within the 5s timeout: context deadline exceeded I0216 11:12:17.131710 1 tasks_processing.go:74] worker 54 stopped. E0216 11:12:17.131719 1 gather.go:143] gatherer "clusterconfig" function "dvo_metrics" failed with the error: DVO metrics service was not available within the 5s timeout: context deadline exceeded I0216 11:12:17.131728 1 recorder.go:75] Recording config/dvo_metrics with fingerprint=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 W0216 11:12:17.131740 1 gather.go:158] issue recording gatherer "clusterconfig" function "dvo_metrics" result "config/dvo_metrics" because of the warning: warning: the record with the same fingerprint "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" was already recorded at path "config/configmaps/openshift-config/openshift-service-ca.crt/service-ca.crt", recording another one with a different path "config/dvo_metrics" I0216 11:12:17.131747 1 gather.go:180] gatherer "clusterconfig" function "dvo_metrics" took 5.066263318s to process 1 records I0216 11:12:17.143608 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for ingress-canary-8tzg5 pod in namespace openshift-ingress-canary for failing operator serve-healthcheck-canary (previous: false): "container \"serve-healthcheck-canary\" in pod \"ingress-canary-8tzg5\" is waiting to start: ContainerCreating" I0216 11:12:17.143623 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"serve-healthcheck-canary\" in pod \"ingress-canary-8tzg5\" is waiting to start: ContainerCreating" I0216 11:12:17.143632 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for serve-healthcheck-canary container ingress-canary-bqm7r pod in namespace openshift-ingress-canary (previous: false). I0216 11:12:17.344092 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for ingress-canary-bqm7r pod in namespace openshift-ingress-canary for failing operator serve-healthcheck-canary (previous: false): "container \"serve-healthcheck-canary\" in pod \"ingress-canary-bqm7r\" is waiting to start: ContainerCreating" I0216 11:12:17.344107 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"serve-healthcheck-canary\" in pod \"ingress-canary-bqm7r\" is waiting to start: ContainerCreating" I0216 11:12:17.344115 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for serve-healthcheck-canary container ingress-canary-wnxtr pod in namespace openshift-ingress-canary (previous: false). I0216 11:12:17.543909 1 gather_cluster_operator_pods_and_events.go:406] Failed to fetch log for ingress-canary-wnxtr pod in namespace openshift-ingress-canary for failing operator serve-healthcheck-canary (previous: false): "container \"serve-healthcheck-canary\" in pod \"ingress-canary-wnxtr\" is waiting to start: ContainerCreating" I0216 11:12:17.543927 1 gather_cluster_operator_pods_and_events.go:278] Error: "container \"serve-healthcheck-canary\" in pod \"ingress-canary-wnxtr\" is waiting to start: ContainerCreating" I0216 11:12:17.543936 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for migrator container migrator-6f6b87f846-mqxbd pod in namespace openshift-kube-storage-version-migrator (previous: false). I0216 11:12:17.745120 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for graceful-termination container migrator-6f6b87f846-mqxbd pod in namespace openshift-kube-storage-version-migrator (previous: false). I0216 11:12:17.944461 1 gather_cluster_operator_pods_and_events.go:363] Fetching logs for kube-storage-version-migrator-operator container kube-storage-version-migrator-operator-85dccb8957-rqtzn pod in namespace openshift-kube-storage-version-migrator-operator (previous: false). I0216 11:12:18.145708 1 tasks_processing.go:74] worker 11 stopped. I0216 11:12:18.145805 1 recorder.go:75] Recording events/openshift-dns-operator with fingerprint=acaaef6db0c996235fec161f2844ccefb0c184df9f32e4160f9f2e9ec90c296d I0216 11:12:18.145859 1 recorder.go:75] Recording events/openshift-dns with fingerprint=5e4d28cfd9e423d73a42e126c0b44584845359ac3683a911ea74323987e2bda7 I0216 11:12:18.145915 1 recorder.go:75] Recording events/openshift-image-registry with fingerprint=4bde33d36dbd7574a77867f10832f5cc05a86acb675da1aace65c47836786eda I0216 11:12:18.145938 1 recorder.go:75] Recording events/openshift-ingress-operator with fingerprint=bbf799ee5b4aab14dbbed6266c12746d6ab6cdd4c864410e9cee500d042d7737 I0216 11:12:18.145974 1 recorder.go:75] Recording events/openshift-ingress with fingerprint=55b8c608d108eaddf3780591048039caf46d82e5abd6be47c52a91941bb86457 I0216 11:12:18.145992 1 recorder.go:75] Recording events/openshift-ingress-canary with fingerprint=7840c938df834ef1f108cf51021219608b0aec76ca5fef8924a6016ab496cd0f I0216 11:12:18.146017 1 recorder.go:75] Recording events/openshift-kube-storage-version-migrator with fingerprint=f7deca3aed611ddd25459dee5df7e8b501157b704bd9397d43ecffa34004351d I0216 11:12:18.146056 1 recorder.go:75] Recording events/openshift-kube-storage-version-migrator-operator with fingerprint=6c01dfd81e2f9a87f5bc08187df106b18da39b5afc10efabc80d399bed7d8089 I0216 11:12:18.146068 1 recorder.go:75] Recording config/pod/openshift-kube-storage-version-migrator/logs/migrator-6f6b87f846-mqxbd/migrator_current.log with fingerprint=bc01cb805c687cb3ec09d5d1cfbb09d4e609023fb0a3d65c78ff4dd51a255bb3 I0216 11:12:18.146075 1 recorder.go:75] Recording config/pod/openshift-kube-storage-version-migrator/logs/migrator-6f6b87f846-mqxbd/graceful-termination_current.log with fingerprint=a2eee3a01941f5f16d0fb46cc799c97fc6cff9839cd5f19048ad2aa03304ff71 I0216 11:12:18.146099 1 recorder.go:75] Recording config/pod/openshift-kube-storage-version-migrator-operator/logs/kube-storage-version-migrator-operator-85dccb8957-rqtzn/kube-storage-version-migrator-operator_current.log with fingerprint=3ec01ec937ad6e794ba33eefe573856fe71469e5e824c0c5eddc33c71ef5f64f I0216 11:12:18.146110 1 gather.go:180] gatherer "clusterconfig" function "operators_pods_and_events" took 6.0829234s to process 11 records I0216 11:12:22.940204 1 configmapobserver.go:84] configmaps "insights-config" not found I0216 11:12:23.137024 1 configmapobserver.go:84] configmaps "insights-config" not found I0216 11:12:24.718246 1 tasks_processing.go:74] worker 62 stopped. I0216 11:12:24.718274 1 recorder.go:75] Recording config/installplans with fingerprint=7b887df561a3a9e6ef0dc672845aa5d56e348505006b7496d3a2f83892b0c95b I0216 11:12:24.718285 1 gather.go:180] gatherer "clusterconfig" function "install_plans" took 12.653549504s to process 1 records I0216 11:12:25.471807 1 tasks_processing.go:74] worker 63 stopped. I0216 11:12:25.472010 1 recorder.go:75] Recording config/serviceaccounts with fingerprint=27f0515cb73dfa1cfff2c918a43fe6fd5d19946c2494addd5bcc80f8aa14116a I0216 11:12:25.472025 1 gather.go:180] gatherer "clusterconfig" function "service_accounts" took 13.408049132s to process 1 records E0216 11:12:25.472074 1 periodic.go:252] clusterconfig failed after 13.409s with: function "machines" failed with an error, function "machine_healthchecks" failed with an error, function "pod_network_connectivity_checks" failed with an error, function "support_secret" failed with an error, function "config_maps" failed with an error, function "ingress_certificates" failed with an error, function "dvo_metrics" failed with an error I0216 11:12:25.472088 1 controllerstatus.go:89] name=periodic-clusterconfig healthy=false reason=PeriodicGatherFailed message=Source clusterconfig could not be retrieved: function "machines" failed with an error, function "machine_healthchecks" failed with an error, function "pod_network_connectivity_checks" failed with an error, function "support_secret" failed with an error, function "config_maps" failed with an error, function "ingress_certificates" failed with an error, function "dvo_metrics" failed with an error I0216 11:12:25.472094 1 periodic.go:214] Running workloads gatherer I0216 11:12:25.472106 1 tasks_processing.go:45] number of workers: 2 I0216 11:12:25.472115 1 tasks_processing.go:69] worker 1 listening for tasks. I0216 11:12:25.472118 1 tasks_processing.go:71] worker 1 working on workload_info task. I0216 11:12:25.472129 1 tasks_processing.go:69] worker 0 listening for tasks. I0216 11:12:25.472199 1 tasks_processing.go:71] worker 0 working on helmchart_info task. I0216 11:12:25.497078 1 gather_workloads_info.go:257] Loaded pods in 0s, will wait 22s for image data I0216 11:12:25.507013 1 gather_workloads_info.go:366] No image sha256:0f31e990f9ca9d15dcb1b25325c8265515fcc06381909349bb021103827585c6 (10ms) I0216 11:12:25.514151 1 tasks_processing.go:74] worker 0 stopped. I0216 11:12:25.514165 1 gather.go:180] gatherer "workloads" function "helmchart_info" took 41.937467ms to process 0 records I0216 11:12:25.523657 1 gather_workloads_info.go:366] No image sha256:0d1d37dbdb726e924b519ef27e52e9719601fab838ae75f72c8aca11e8c3b4cc (17ms) I0216 11:12:25.541221 1 gather_workloads_info.go:366] No image sha256:b34e84d56775e42b7d832d14c4f9dc302fee37cd81ba221397cd8acba2089d20 (18ms) I0216 11:12:25.553623 1 gather_workloads_info.go:366] No image sha256:59f553035bc347fc7205f1c071897bc2606b98525d6b9a3aca62fc9cd7078a57 (12ms) I0216 11:12:25.565557 1 gather_workloads_info.go:366] No image sha256:457372d9f22e1c726ea1a6fcc54ddca8335bd607d2c357bcd7b63a7017aa5c2b (12ms) I0216 11:12:25.575961 1 gather_workloads_info.go:366] No image sha256:822db36f8e1353ac24785b88d1fb2150d3ef34a5e739c1f67b61079336e9798b (10ms) I0216 11:12:25.587178 1 gather_workloads_info.go:366] No image sha256:712ad2760c350db1e23b9393bdda83149452931dc7b5a5038a3bcdb4663917c0 (11ms) I0216 11:12:25.597571 1 gather_workloads_info.go:366] No image sha256:64ef34275f7ea992f5a4739cf7a724e55806bfab0c752fc0eccc2f70dfecbaf4 (10ms) I0216 11:12:25.609024 1 gather_workloads_info.go:366] No image sha256:036e6f9a4609a7499f200032dac2294e4a2d98764464ed17453ef725f2f0264d (11ms) I0216 11:12:25.621456 1 gather_workloads_info.go:366] No image sha256:91d9cb208e6d0c39a87dfe8276d162c75ff3fcd3b005b3e7b537f65c53475a42 (12ms) I0216 11:12:25.631215 1 gather_workloads_info.go:366] No image sha256:9cc55a501aaad1adbefdd573e57c2f756a3a6a8723c43052995be6389edf1fa8 (10ms) I0216 11:12:25.708199 1 gather_workloads_info.go:366] No image sha256:29d1672ef44c59d065737eca330075dd2f6da4ba743153973a739aa9e9d73ad3 (77ms) I0216 11:12:25.810932 1 gather_workloads_info.go:366] No image sha256:88e6cc2192e682bb9c4ac5aec8e41254696d909c5dc337e720b9ec165a728064 (103ms) I0216 11:12:25.907409 1 gather_workloads_info.go:366] No image sha256:c822bd444a7bc53b21afb9372ff0a24961b2687073f3563c127cce5803801b04 (96ms) I0216 11:12:26.006609 1 gather_workloads_info.go:366] No image sha256:2121717e0222b9e8892a44907b461a4f62b3f1e5429a0e2eee802d48d04fff30 (99ms) I0216 11:12:26.107602 1 gather_workloads_info.go:366] No image sha256:185305b7da4ef5b90a90046f145e8c66bab3a16b12771d2e98bf78104d6a60f2 (101ms) I0216 11:12:26.207405 1 gather_workloads_info.go:366] No image sha256:2bf8536171476b2d616cf62b4d94d2f1dae34aca6ea6bfdb65e764a8d9675891 (100ms) I0216 11:12:26.306431 1 gather_workloads_info.go:366] No image sha256:deffb0293fd11f5b40609aa9e80b16b0f90a9480013b2b7f61bd350bbd9b6f07 (99ms) I0216 11:12:26.406321 1 gather_workloads_info.go:366] No image sha256:5335f64616c3a6c55a9a6dc4bc084b46a4957fb4fc250afc5343e4547ebb3598 (100ms) I0216 11:12:26.507659 1 gather_workloads_info.go:366] No image sha256:0a62577e0731d4421a54b438b79a29d9b9d651d0086ef41d8579c3bc4ff89da7 (101ms) I0216 11:12:26.607281 1 gather_workloads_info.go:366] No image sha256:29e41a505a942a77c0d5f954eb302c01921cb0c0d176066fe63f82f3e96e3923 (100ms) I0216 11:12:26.706792 1 gather_workloads_info.go:366] No image sha256:357821852af925e0c8a19df2f9fceec8d2e49f9d13575b86ecd3fbedce488afa (99ms) I0216 11:12:26.807275 1 gather_workloads_info.go:366] No image sha256:745f2186738a57bb1b484f68431e77aa2f68a1b8dcb434b1f7a4b429eafdf091 (100ms) I0216 11:12:26.907499 1 gather_workloads_info.go:366] No image sha256:27e725f1250f6a17da5eba7ada315a244592b5b822d61e95722bb7e2f884b00f (100ms) I0216 11:12:27.006678 1 gather_workloads_info.go:366] No image sha256:2193d7361704b0ae4bca052e9158761e06ecbac9ca3f0a9c8f0f101127e8f370 (99ms) I0216 11:12:27.107435 1 gather_workloads_info.go:366] No image sha256:33d7e5c63340e93b5a063de538017ac693f154e3c27ee2ef8a8a53bb45583552 (101ms) I0216 11:12:27.206971 1 gather_workloads_info.go:366] No image sha256:f82357030795138d2081ecc5172092222b0f4faea27e9a7a0474fbeae29111ad (100ms) I0216 11:12:27.307513 1 gather_workloads_info.go:366] No image sha256:f550296753e9898c67d563b7deb16ba540ca1367944c905415f35537b6b949d4 (101ms) I0216 11:12:27.406598 1 gather_workloads_info.go:366] No image sha256:586e9c2756f50e562a6123f47fe38dba5496b63413c3dd18e0b85d6167094f0c (99ms) I0216 11:12:27.506557 1 gather_workloads_info.go:366] No image sha256:7f55b7dbfb15fe36d83d64027eacee22fb00688ccbc03550cc2dbedfa633f288 (100ms) I0216 11:12:27.606719 1 gather_workloads_info.go:366] No image sha256:43e426ac9df633be58006907aede6f9b6322c6cc7985cd43141ad7518847c637 (100ms) I0216 11:12:27.707421 1 gather_workloads_info.go:366] No image sha256:79449e16b1207223f1209d19888b879eb56a8202c53df4800e09b231392cf219 (101ms) I0216 11:12:27.707452 1 tasks_processing.go:74] worker 1 stopped. I0216 11:12:27.707661 1 recorder.go:75] Recording config/workload_info with fingerprint=75f5fd8cfe7452cc09f6434ea964c8426354098151db81bba8cc5c2be78d1ef7 I0216 11:12:27.707678 1 gather.go:180] gatherer "workloads" function "workload_info" took 2.23532746s to process 1 records I0216 11:12:27.707692 1 periodic.go:261] Periodic gather workloads completed in 2.235s I0216 11:12:27.707703 1 controllerstatus.go:80] name=periodic-workloads healthy=true reason= message= I0216 11:12:27.707709 1 periodic.go:214] Running conditional gatherer I0216 11:12:27.713776 1 requests.go:282] Making HTTP GET request at: https://console.redhat.com/api/gathering/v2/4.17.48/gathering_rules I0216 11:12:27.718610 1 conditional_gatherer.go:107] Get "https://console.redhat.com/api/gathering/v2/4.17.48/gathering_rules": dial tcp: lookup console.redhat.com on 172.30.0.10:53: read udp 10.129.0.10:50723->172.30.0.10:53: read: connection refused E0216 11:12:27.718842 1 conditional_gatherer.go:324] unable to update alerts cache: open /var/run/configmaps/service-ca-bundle/service-ca.crt: no such file or directory I0216 11:12:27.718897 1 conditional_gatherer.go:386] updating version cache for conditional gatherer I0216 11:12:27.725223 1 conditional_gatherer.go:394] cluster version is '4.17.48' E0216 11:12:27.725235 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 11:12:27.725241 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 11:12:27.725245 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 11:12:27.725248 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 11:12:27.725252 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 11:12:27.725255 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 11:12:27.725262 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 11:12:27.725266 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing E0216 11:12:27.725269 1 conditional_gatherer.go:211] error checking conditions for a gathering rule: alerts cache is missing I0216 11:12:27.725284 1 tasks_processing.go:45] number of workers: 3 I0216 11:12:27.725299 1 tasks_processing.go:69] worker 2 listening for tasks. I0216 11:12:27.725304 1 tasks_processing.go:71] worker 2 working on conditional_gatherer_rules task. I0216 11:12:27.725313 1 tasks_processing.go:69] worker 0 listening for tasks. I0216 11:12:27.725327 1 tasks_processing.go:71] worker 0 working on remote_configuration task. I0216 11:12:27.725326 1 tasks_processing.go:69] worker 1 listening for tasks. I0216 11:12:27.725335 1 tasks_processing.go:74] worker 1 stopped. I0216 11:12:27.725342 1 tasks_processing.go:71] worker 2 working on rapid_container_logs task. I0216 11:12:27.725381 1 recorder.go:75] Recording insights-operator/conditional-gatherer-rules with fingerprint=7034af97e7e41c22e4b775abdd4b9066c8ebb19da33eb7f69f39bfd2eb5f6406 I0216 11:12:27.725395 1 gather.go:180] gatherer "conditional" function "conditional_gatherer_rules" took 995ns to process 1 records I0216 11:12:27.725423 1 recorder.go:75] Recording insights-operator/remote-configuration with fingerprint=0394430c431eec4d48bb1811a90918e95161d2282c59af26f2473613cc0959db I0216 11:12:27.725433 1 gather.go:180] gatherer "conditional" function "remote_configuration" took 954ns to process 1 records I0216 11:12:27.725440 1 tasks_processing.go:74] worker 0 stopped. I0216 11:12:27.725491 1 tasks_processing.go:74] worker 2 stopped. I0216 11:12:27.725502 1 gather.go:180] gatherer "conditional" function "rapid_container_logs" took 136.648µs to process 0 records I0216 11:12:27.725521 1 controllerstatus.go:89] name=periodic-conditional healthy=false reason=NotAvailable message=Get "https://console.redhat.com/api/gathering/v2/4.17.48/gathering_rules": dial tcp: lookup console.redhat.com on 172.30.0.10:53: read udp 10.129.0.10:50723->172.30.0.10:53: read: connection refused I0216 11:12:27.725532 1 recorder.go:75] Recording insights-operator/remote-configuration.json with fingerprint=359de9c990c741675cec72fda96b5c3682221efdb4799f5eaa6e9805bcd3b5c1 W0216 11:12:27.749989 1 gather.go:212] can't read cgroups memory usage data: open /sys/fs/cgroup/memory/memory.usage_in_bytes: no such file or directory I0216 11:12:27.750116 1 recorder.go:75] Recording insights-operator/gathers with fingerprint=6ab3c8fd237202c6b042a97210c79c8c3f5c2231d442155a9b4fa500de0c4436 I0216 11:12:27.750255 1 diskrecorder.go:70] Writing 105 records to /var/lib/insights-operator/insights-2026-02-16-111227.tar.gz I0216 11:12:27.755934 1 diskrecorder.go:51] Wrote 105 records to disk in 5ms I0216 11:12:27.755960 1 periodic.go:283] Gathering cluster info every 2h0m0s I0216 11:12:27.755981 1 periodic.go:284] Configuration is dataReporting: interval: 2h0m0s, uploadEndpoint: https://console.redhat.com/api/ingress/v1/upload, storagePath: /var/lib/insights-operator, downloadEndpoint: https://console.redhat.com/api/insights-results-aggregator/v2/cluster/%s/reports, conditionalGathererEndpoint: https://console.redhat.com/api/gathering/v2/%s/gathering_rules, obfuscation: [] sca: disabled: false, endpoint: https://api.openshift.com/api/accounts_mgmt/v1/certificates, interval: 8h0m0s alerting: disabled: false clusterTransfer: endpoint: https://api.openshift.com/api/accounts_mgmt/v1/cluster_transfers/, interval: 12h0m0s proxy: httpProxy: , httpsProxy: , noProxy: I0216 11:12:33.781336 1 configmapobserver.go:84] configmaps "insights-config" not found I0216 11:13:41.650624 1 observer_polling.go:111] Observed file "/var/run/secrets/serving-cert/tls.crt" has been created (hash="cffbc25fbc47aa117d89ca2eb65a1843534e8ecd12035212027ff5e25441f129") W0216 11:13:41.650654 1 builder.go:155] Restart triggered because of file /var/run/secrets/serving-cert/tls.crt was created I0216 11:13:41.650683 1 observer_polling.go:111] Observed file "/var/run/secrets/serving-cert/tls.key" has been created (hash="beaa7ed8832908e16feb18c72e3e6b832ec54274d2a1776da0086e88a91a85a7") I0216 11:13:41.650734 1 observer_polling.go:111] Observed file "/var/run/configmaps/service-ca-bundle/service-ca.crt" has been created (hash="d3d39066fa541a01abe6e2a491e5ec627f1f6229317d7e25bfde138f4e3a8e30") I0216 11:13:41.650762 1 simple_featuregate_reader.go:177] Shutting down feature-gate-detector I0216 11:13:41.650793 1 base_controller.go:172] Shutting down LoggingSyncer ... I0216 11:13:41.650808 1 periodic.go:175] Shutting down