I0213 18:02:11.502297 1 serving.go:386] Generated self-signed cert in-memory I0213 18:02:11.966980 1 controllermanager.go:185] "Starting" version="v1.32.5" I0213 18:02:11.967000 1 controllermanager.go:187] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0213 18:02:11.968246 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/etc/kubernetes/pki/front-proxy-ca.crt" I0213 18:02:11.968249 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/pki/ca.crt" I0213 18:02:11.968437 1 secure_serving.go:213] Serving securely on 127.0.0.1:10257 I0213 18:02:11.968504 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" I0213 18:02:11.968622 1 leaderelection.go:257] attempting to acquire leader lease kube-system/kube-controller-manager... E0213 18:02:13.155856 1 leaderelection.go:436] error retrieving resource lock kube-system/kube-controller-manager: leases.coordination.k8s.io "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "kube-system" I0213 18:02:16.784956 1 leaderelection.go:271] successfully acquired lease kube-system/kube-controller-manager I0213 18:02:16.785156 1 event.go:389] "Event occurred" object="kube-system/kube-controller-manager" fieldPath="" kind="Lease" apiVersion="coordination.k8s.io/v1" type="Normal" reason="LeaderElection" message="kind-mapt-control-plane_362affc1-45b7-4643-ad10-b231b2c38aa0 became leader" I0213 18:02:16.787163 1 controllermanager.go:765] "Started controller" controller="serviceaccount-token-controller" I0213 18:02:16.787188 1 shared_informer.go:313] Waiting for caches to sync for tokens I0213 18:02:16.800608 1 controllermanager.go:765] "Started controller" controller="serviceaccount-controller" I0213 18:02:16.800688 1 serviceaccounts_controller.go:114] "Starting service account controller" logger="serviceaccount-controller" I0213 18:02:16.800710 1 shared_informer.go:313] Waiting for caches to sync for service account I0213 18:02:16.807628 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-cleaner-controller" I0213 18:02:16.807657 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="node-route-controller" I0213 18:02:16.807708 1 cleaner.go:83] "Starting CSR cleaner controller" logger="certificatesigningrequest-cleaner-controller" I0213 18:02:16.816297 1 controllermanager.go:765] "Started controller" controller="persistentvolume-expander-controller" I0213 18:02:16.816387 1 expand_controller.go:329] "Starting expand controller" logger="persistentvolume-expander-controller" I0213 18:02:16.816400 1 shared_informer.go:313] Waiting for caches to sync for expand I0213 18:02:16.825128 1 controllermanager.go:765] "Started controller" controller="ttl-after-finished-controller" I0213 18:02:16.825239 1 ttlafterfinished_controller.go:112] "Starting TTL after finished controller" logger="ttl-after-finished-controller" I0213 18:02:16.825253 1 shared_informer.go:313] Waiting for caches to sync for TTL after finished I0213 18:02:16.832258 1 controllermanager.go:765] "Started controller" controller="endpoints-controller" I0213 18:02:16.832322 1 endpoints_controller.go:182] "Starting endpoint controller" logger="endpoints-controller" I0213 18:02:16.832338 1 shared_informer.go:313] Waiting for caches to sync for endpoint I0213 18:02:16.839118 1 controllermanager.go:765] "Started controller" controller="endpointslice-mirroring-controller" I0213 18:02:16.839253 1 endpointslicemirroring_controller.go:227] "Starting EndpointSliceMirroring controller" logger="endpointslice-mirroring-controller" I0213 18:02:16.839266 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice_mirroring I0213 18:02:16.857997 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="horizontalpodautoscalers.autoscaling" I0213 18:02:16.858056 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="poddisruptionbudgets.policy" I0213 18:02:16.858117 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="leases.coordination.k8s.io" I0213 18:02:16.858164 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="replicasets.apps" I0213 18:02:16.858198 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="controllerrevisions.apps" I0213 18:02:16.858245 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="statefulsets.apps" I0213 18:02:16.858271 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="jobs.batch" I0213 18:02:16.858298 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="networkpolicies.networking.k8s.io" I0213 18:02:16.858326 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpoints" I0213 18:02:16.858360 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cronjobs.batch" I0213 18:02:16.858382 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="roles.rbac.authorization.k8s.io" I0213 18:02:16.858418 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="daemonsets.apps" I0213 18:02:16.858439 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deployments.apps" I0213 18:02:16.858461 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ingresses.networking.k8s.io" I0213 18:02:16.858487 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="csistoragecapacities.storage.k8s.io" I0213 18:02:16.858527 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="limitranges" W0213 18:02:16.858540 1 shared_informer.go:597] resyncPeriod 13h23m20.233476912s is smaller than resyncCheckPeriod 18h27m16.385369852s and the informer has already started. Changing it to 18h27m16.385369852s I0213 18:02:16.858578 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="serviceaccounts" I0213 18:02:16.858602 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="rolebindings.rbac.authorization.k8s.io" I0213 18:02:16.858623 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpointslices.discovery.k8s.io" W0213 18:02:16.858632 1 shared_informer.go:597] resyncPeriod 15h29m36.139895169s is smaller than resyncCheckPeriod 18h27m16.385369852s and the informer has already started. Changing it to 18h27m16.385369852s I0213 18:02:16.858704 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podtemplates" I0213 18:02:16.858724 1 controllermanager.go:765] "Started controller" controller="resourcequota-controller" I0213 18:02:16.858733 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="kube-apiserver-serving-clustertrustbundle-publisher-controller" requiredFeatureGates=["ClusterTrustBundle"] I0213 18:02:16.858748 1 resource_quota_controller.go:300] "Starting resource quota controller" logger="resourcequota-controller" I0213 18:02:16.858839 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:02:16.858866 1 resource_quota_monitor.go:308] "QuotaMonitor running" logger="resourcequota-controller" I0213 18:02:16.860420 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-approving-controller" I0213 18:02:16.860516 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-approving-controller" name="csrapproving" I0213 18:02:16.860575 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrapproving I0213 18:02:16.867913 1 controllermanager.go:765] "Started controller" controller="persistentvolume-binder-controller" I0213 18:02:16.868037 1 pv_controller_base.go:308] "Starting persistent volume controller" logger="persistentvolume-binder-controller" I0213 18:02:16.868051 1 shared_informer.go:313] Waiting for caches to sync for persistent volume I0213 18:02:16.888274 1 shared_informer.go:320] Caches are synced for tokens I0213 18:02:16.891324 1 controllermanager.go:765] "Started controller" controller="persistentvolumeclaim-protection-controller" I0213 18:02:16.891362 1 pvc_protection_controller.go:168] "Starting PVC protection controller" logger="persistentvolumeclaim-protection-controller" I0213 18:02:16.891371 1 shared_informer.go:313] Waiting for caches to sync for PVC protection I0213 18:02:17.041713 1 controllermanager.go:765] "Started controller" controller="ephemeral-volume-controller" I0213 18:02:17.041731 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="selinux-warning-controller" requiredFeatureGates=["SELinuxChangePolicy"] I0213 18:02:17.041779 1 controller.go:173] "Starting ephemeral volume controller" logger="ephemeral-volume-controller" I0213 18:02:17.041788 1 shared_informer.go:313] Waiting for caches to sync for ephemeral I0213 18:02:17.193466 1 controllermanager.go:765] "Started controller" controller="job-controller" I0213 18:02:17.193533 1 job_controller.go:243] "Starting job controller" logger="job-controller" I0213 18:02:17.193545 1 shared_informer.go:313] Waiting for caches to sync for job I0213 18:02:17.491234 1 controllermanager.go:765] "Started controller" controller="horizontal-pod-autoscaler-controller" I0213 18:02:17.491284 1 horizontal.go:201] "Starting HPA controller" logger="horizontal-pod-autoscaler-controller" I0213 18:02:17.491294 1 shared_informer.go:313] Waiting for caches to sync for HPA I0213 18:02:17.641601 1 controllermanager.go:765] "Started controller" controller="statefulset-controller" I0213 18:02:17.641716 1 stateful_set.go:166] "Starting stateful set controller" logger="statefulset-controller" I0213 18:02:17.641732 1 shared_informer.go:313] Waiting for caches to sync for stateful set I0213 18:02:17.891599 1 range_allocator.go:112] "No Secondary Service CIDR provided. Skipping filtering out secondary service addresses" logger="node-ipam-controller" I0213 18:02:17.891651 1 controllermanager.go:765] "Started controller" controller="node-ipam-controller" I0213 18:02:17.891661 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="volumeattributesclass-protection-controller" requiredFeatureGates=["VolumeAttributesClass"] I0213 18:02:17.891765 1 node_ipam_controller.go:141] "Starting ipam controller" logger="node-ipam-controller" I0213 18:02:17.891779 1 shared_informer.go:313] Waiting for caches to sync for node I0213 18:02:17.938540 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-serving" I0213 18:02:17.938568 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-serving I0213 18:02:17.938590 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0213 18:02:17.938848 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-client" I0213 18:02:17.938864 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-client I0213 18:02:17.938884 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0213 18:02:17.939142 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kube-apiserver-client" I0213 18:02:17.939156 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kube-apiserver-client I0213 18:02:17.939177 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0213 18:02:17.939347 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-signing-controller" I0213 18:02:17.939410 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-legacy-unknown" I0213 18:02:17.939428 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-legacy-unknown I0213 18:02:17.939447 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0213 18:02:18.092544 1 controllermanager.go:765] "Started controller" controller="ttl-controller" I0213 18:02:18.092598 1 ttl_controller.go:127] "Starting TTL controller" logger="ttl-controller" I0213 18:02:18.092612 1 shared_informer.go:313] Waiting for caches to sync for TTL I0213 18:02:18.138064 1 node_lifecycle_controller.go:432] "Controller will reconcile labels" logger="node-lifecycle-controller" I0213 18:02:18.138107 1 controllermanager.go:765] "Started controller" controller="node-lifecycle-controller" I0213 18:02:18.138191 1 node_lifecycle_controller.go:466] "Sending events to api server" logger="node-lifecycle-controller" I0213 18:02:18.138232 1 node_lifecycle_controller.go:477] "Starting node controller" logger="node-lifecycle-controller" I0213 18:02:18.138241 1 shared_informer.go:313] Waiting for caches to sync for taint I0213 18:02:18.291662 1 controllermanager.go:765] "Started controller" controller="legacy-serviceaccount-token-cleaner-controller" I0213 18:02:18.291684 1 controllermanager.go:743] "Warning: skipping controller" controller="storage-version-migrator-controller" I0213 18:02:18.291719 1 legacy_serviceaccount_token_cleaner.go:103] "Starting legacy service account token cleaner controller" logger="legacy-serviceaccount-token-cleaner-controller" I0213 18:02:18.291730 1 shared_informer.go:313] Waiting for caches to sync for legacy-service-account-token-cleaner I0213 18:02:18.537793 1 controllermanager.go:765] "Started controller" controller="garbage-collector-controller" I0213 18:02:18.537836 1 garbagecollector.go:144] "Starting controller" logger="garbage-collector-controller" controller="garbagecollector" I0213 18:02:18.537863 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:02:18.537888 1 graph_builder.go:351] "Running" logger="garbage-collector-controller" component="GraphBuilder" I0213 18:02:18.791535 1 controllermanager.go:765] "Started controller" controller="daemonset-controller" I0213 18:02:18.791684 1 daemon_controller.go:294] "Starting daemon sets controller" logger="daemonset-controller" I0213 18:02:18.791696 1 shared_informer.go:313] Waiting for caches to sync for daemon sets I0213 18:02:18.942780 1 controllermanager.go:765] "Started controller" controller="cronjob-controller" I0213 18:02:18.942794 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="service-lb-controller" I0213 18:02:18.942891 1 cronjob_controllerv2.go:145] "Starting cronjob controller v2" logger="cronjob-controller" I0213 18:02:18.942905 1 shared_informer.go:313] Waiting for caches to sync for cronjob I0213 18:02:19.091614 1 controllermanager.go:765] "Started controller" controller="persistentvolume-attach-detach-controller" I0213 18:02:19.091814 1 attach_detach_controller.go:338] "Starting attach detach controller" logger="persistentvolume-attach-detach-controller" I0213 18:02:19.091827 1 shared_informer.go:313] Waiting for caches to sync for attach detach I0213 18:02:19.240925 1 controllermanager.go:765] "Started controller" controller="clusterrole-aggregation-controller" I0213 18:02:19.240941 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="storageversion-garbage-collector-controller" requiredFeatureGates=["APIServerIdentity","StorageVersionAPI"] I0213 18:02:19.240961 1 clusterroleaggregation_controller.go:194] "Starting ClusterRoleAggregator controller" logger="clusterrole-aggregation-controller" I0213 18:02:19.240976 1 shared_informer.go:313] Waiting for caches to sync for ClusterRoleAggregator I0213 18:02:19.287314 1 controllermanager.go:765] "Started controller" controller="taint-eviction-controller" I0213 18:02:19.287365 1 taint_eviction.go:281] "Starting" logger="taint-eviction-controller" controller="taint-eviction-controller" I0213 18:02:19.287390 1 taint_eviction.go:287] "Sending events to api server" logger="taint-eviction-controller" I0213 18:02:19.287409 1 shared_informer.go:313] Waiting for caches to sync for taint-eviction-controller I0213 18:02:19.441541 1 controllermanager.go:765] "Started controller" controller="replicationcontroller-controller" I0213 18:02:19.441602 1 replica_set.go:217] "Starting controller" logger="replicationcontroller-controller" name="replicationcontroller" I0213 18:02:19.441613 1 shared_informer.go:313] Waiting for caches to sync for ReplicationController I0213 18:02:19.637210 1 controllermanager.go:765] "Started controller" controller="disruption-controller" I0213 18:02:19.637273 1 disruption.go:452] "Sending events to api server." logger="disruption-controller" I0213 18:02:19.637306 1 disruption.go:463] "Starting disruption controller" logger="disruption-controller" I0213 18:02:19.637318 1 shared_informer.go:313] Waiting for caches to sync for disruption I0213 18:02:19.791028 1 controllermanager.go:765] "Started controller" controller="bootstrap-signer-controller" I0213 18:02:19.791044 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="service-cidr-controller" requiredFeatureGates=["MultiCIDRServiceAllocator"] I0213 18:02:19.791086 1 shared_informer.go:313] Waiting for caches to sync for bootstrap_signer I0213 18:02:19.941190 1 controllermanager.go:765] "Started controller" controller="endpointslice-controller" I0213 18:02:19.941792 1 endpointslice_controller.go:281] "Starting endpoint slice controller" logger="endpointslice-controller" I0213 18:02:19.941823 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice I0213 18:02:20.090961 1 controllermanager.go:765] "Started controller" controller="pod-garbage-collector-controller" I0213 18:02:20.090976 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="cloud-node-lifecycle-controller" I0213 18:02:20.090982 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="resourceclaim-controller" requiredFeatureGates=["DynamicResourceAllocation"] I0213 18:02:20.091010 1 gc_controller.go:99] "Starting GC controller" logger="pod-garbage-collector-controller" I0213 18:02:20.091027 1 shared_informer.go:313] Waiting for caches to sync for GC I0213 18:02:20.287450 1 controllermanager.go:765] "Started controller" controller="validatingadmissionpolicy-status-controller" I0213 18:02:20.287500 1 shared_informer.go:313] Waiting for caches to sync for validatingadmissionpolicy-status I0213 18:02:20.541723 1 controllermanager.go:765] "Started controller" controller="namespace-controller" I0213 18:02:20.541745 1 namespace_controller.go:202] "Starting namespace controller" logger="namespace-controller" I0213 18:02:20.541759 1 shared_informer.go:313] Waiting for caches to sync for namespace I0213 18:02:20.695629 1 controllermanager.go:765] "Started controller" controller="replicaset-controller" I0213 18:02:20.695725 1 replica_set.go:217] "Starting controller" logger="replicaset-controller" name="replicaset" I0213 18:02:20.695742 1 shared_informer.go:313] Waiting for caches to sync for ReplicaSet I0213 18:02:20.842026 1 controllermanager.go:765] "Started controller" controller="token-cleaner-controller" I0213 18:02:20.842074 1 tokencleaner.go:117] "Starting token cleaner controller" logger="token-cleaner-controller" I0213 18:02:20.842082 1 shared_informer.go:313] Waiting for caches to sync for token_cleaner I0213 18:02:20.842089 1 shared_informer.go:320] Caches are synced for token_cleaner I0213 18:02:20.992743 1 controllermanager.go:765] "Started controller" controller="deployment-controller" I0213 18:02:20.992850 1 deployment_controller.go:173] "Starting controller" logger="deployment-controller" controller="deployment" I0213 18:02:20.992866 1 shared_informer.go:313] Waiting for caches to sync for deployment I0213 18:02:21.141155 1 controllermanager.go:765] "Started controller" controller="persistentvolume-protection-controller" I0213 18:02:21.141228 1 pv_protection_controller.go:81] "Starting PV protection controller" logger="persistentvolume-protection-controller" I0213 18:02:21.141245 1 shared_informer.go:313] Waiting for caches to sync for PV protection I0213 18:02:21.291103 1 controllermanager.go:765] "Started controller" controller="root-ca-certificate-publisher-controller" I0213 18:02:21.291229 1 publisher.go:107] "Starting root CA cert publisher controller" logger="root-ca-certificate-publisher-controller" I0213 18:02:21.291246 1 shared_informer.go:313] Waiting for caches to sync for crt configmap I0213 18:02:21.295187 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:02:21.300055 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"kind-mapt-control-plane\" does not exist" I0213 18:02:21.300776 1 shared_informer.go:320] Caches are synced for service account I0213 18:02:21.302438 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:02:21.317145 1 shared_informer.go:320] Caches are synced for expand I0213 18:02:21.325302 1 shared_informer.go:320] Caches are synced for TTL after finished I0213 18:02:21.332539 1 shared_informer.go:320] Caches are synced for endpoint I0213 18:02:21.337395 1 shared_informer.go:320] Caches are synced for disruption I0213 18:02:21.338442 1 shared_informer.go:320] Caches are synced for taint I0213 18:02:21.338449 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:02:21.338460 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller" I0213 18:02:21.338476 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller" I0213 18:02:21.338546 1 node_lifecycle_controller.go:1234] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone="" I0213 18:02:21.338654 1 node_lifecycle_controller.go:886] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="kind-mapt-control-plane" I0213 18:02:21.338658 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-serving I0213 18:02:21.338696 1 node_lifecycle_controller.go:1038] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller" I0213 18:02:21.338897 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-client I0213 18:02:21.339851 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kube-apiserver-client I0213 18:02:21.339857 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-legacy-unknown I0213 18:02:21.339940 1 shared_informer.go:320] Caches are synced for endpoint_slice_mirroring I0213 18:02:21.341072 1 shared_informer.go:320] Caches are synced for ClusterRoleAggregator I0213 18:02:21.341286 1 shared_informer.go:320] Caches are synced for PV protection I0213 18:02:21.342264 1 shared_informer.go:320] Caches are synced for ReplicationController I0213 18:02:21.342280 1 shared_informer.go:320] Caches are synced for endpoint_slice I0213 18:02:21.342316 1 shared_informer.go:320] Caches are synced for ephemeral I0213 18:02:21.342321 1 shared_informer.go:320] Caches are synced for stateful set I0213 18:02:21.342325 1 shared_informer.go:320] Caches are synced for namespace I0213 18:02:21.343548 1 shared_informer.go:320] Caches are synced for cronjob I0213 18:02:21.359814 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:02:21.360870 1 shared_informer.go:320] Caches are synced for certificate-csrapproving I0213 18:02:21.368095 1 shared_informer.go:320] Caches are synced for persistent volume I0213 18:02:21.387761 1 shared_informer.go:320] Caches are synced for taint-eviction-controller I0213 18:02:21.387789 1 shared_informer.go:320] Caches are synced for validatingadmissionpolicy-status I0213 18:02:21.391975 1 shared_informer.go:320] Caches are synced for attach detach I0213 18:02:21.391989 1 shared_informer.go:320] Caches are synced for GC I0213 18:02:21.392000 1 shared_informer.go:320] Caches are synced for HPA I0213 18:02:21.392015 1 shared_informer.go:320] Caches are synced for crt configmap I0213 18:02:21.392030 1 shared_informer.go:320] Caches are synced for legacy-service-account-token-cleaner I0213 18:02:21.392045 1 shared_informer.go:320] Caches are synced for node I0213 18:02:21.392053 1 shared_informer.go:320] Caches are synced for bootstrap_signer I0213 18:02:21.392063 1 shared_informer.go:320] Caches are synced for daemon sets I0213 18:02:21.392052 1 shared_informer.go:320] Caches are synced for PVC protection I0213 18:02:21.392093 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller" I0213 18:02:21.392129 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller" I0213 18:02:21.392135 1 shared_informer.go:313] Waiting for caches to sync for cidrallocator I0213 18:02:21.392140 1 shared_informer.go:320] Caches are synced for cidrallocator I0213 18:02:21.393453 1 shared_informer.go:320] Caches are synced for deployment I0213 18:02:21.393580 1 shared_informer.go:320] Caches are synced for job I0213 18:02:21.393581 1 shared_informer.go:320] Caches are synced for TTL I0213 18:02:21.395836 1 shared_informer.go:320] Caches are synced for ReplicaSet I0213 18:02:21.395868 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:02:21.399248 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="kind-mapt-control-plane" podCIDRs=["10.244.0.0/24"] I0213 18:02:21.399276 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:21.399302 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:21.403206 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:02:22.394540 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:22.503752 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="204.924836ms" I0213 18:02:22.507316 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="211.139817ms" I0213 18:02:22.510520 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="6.731406ms" I0213 18:02:22.510603 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="56.677µs" I0213 18:02:22.513681 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="6.333608ms" I0213 18:02:22.513741 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="36.483µs" I0213 18:02:22.516703 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="64.58µs" I0213 18:02:22.523001 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="24.038µs" I0213 18:02:26.029436 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:34.874357 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:34.882154 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:34.887890 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="43.664µs" I0213 18:02:34.891436 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="77.311µs" I0213 18:02:34.891533 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="38.042µs" I0213 18:02:34.907623 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="38.294µs" I0213 18:02:34.917310 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="61.061µs" I0213 18:02:34.925701 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="63.175µs" I0213 18:02:36.238367 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:36.341130 1 node_lifecycle_controller.go:1057] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller" I0213 18:02:42.110388 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="6.225677ms" I0213 18:02:42.110454 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="34.247µs" I0213 18:02:42.131114 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="8.965105ms" I0213 18:02:42.131204 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="59.759µs" I0213 18:02:42.140616 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="6.481808ms" I0213 18:02:42.140683 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="47.116µs" I0213 18:02:46.450969 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:03:10.455215 1 namespace_controller.go:187] "Namespace has been deleted" logger="namespace-controller" namespace="test-pvc-ns" I0213 18:03:14.361515 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="19.320313ms" I0213 18:03:14.373915 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="12.357674ms" I0213 18:03:14.373987 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="44.48µs" I0213 18:03:14.374028 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="27.029µs" I0213 18:03:14.551627 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="27.483089ms" I0213 18:03:14.557780 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="6.100253ms" I0213 18:03:14.557851 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="44.587µs" I0213 18:03:14.561023 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="36.817µs" I0213 18:03:14.570766 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="52.075µs" I0213 18:03:14.730085 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="22.191794ms" I0213 18:03:14.741016 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="10.896434ms" I0213 18:03:14.741100 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="54.182µs" I0213 18:03:14.745354 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="53.4µs" I0213 18:03:14.888762 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="0s" I0213 18:03:14.899060 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:14.907160 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:14.907192 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:14.919854 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:16.951480 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:03:18.187490 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="9.719276ms" I0213 18:03:18.187565 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="49.102µs" I0213 18:03:20.190391 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="8.755128ms" I0213 18:03:20.190461 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="41.752µs" I0213 18:03:21.402845 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificates.cert-manager.io" I0213 18:03:21.402889 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="orders.acme.cert-manager.io" I0213 18:03:21.402907 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="challenges.acme.cert-manager.io" I0213 18:03:21.402925 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificaterequests.cert-manager.io" I0213 18:03:21.402942 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="issuers.cert-manager.io" I0213 18:03:21.402991 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:03:21.412044 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:03:21.503959 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:03:21.513113 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:03:22.188588 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="66.26µs" I0213 18:03:23.190218 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:24.198166 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:27.301027 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:03:28.200942 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="8.892631ms" I0213 18:03:28.201018 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="52.028µs" I0213 18:03:31.034219 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="17.782866ms" I0213 18:03:31.041603 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="7.347466ms" I0213 18:03:31.041717 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="85.021µs" I0213 18:03:31.048254 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="40.395µs" I0213 18:03:34.208480 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:35.212625 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="51.722µs" I0213 18:03:35.213990 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:35.307723 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:36.220225 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:36.226044 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:03:38.223095 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="51.711µs" I0213 18:03:46.237111 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="8.897681ms" I0213 18:03:46.237189 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="52.164µs" I0213 18:03:47.927476 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:03:51.519009 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:03:52.102463 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="20.442595ms" I0213 18:03:52.118002 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="15.508154ms" I0213 18:03:52.118065 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="34.674µs" I0213 18:03:52.279873 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="16.736379ms" I0213 18:03:52.287853 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="7.949191ms" I0213 18:03:52.287907 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="32.923µs" I0213 18:03:52.291406 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="29.569µs" I0213 18:03:52.620078 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:03:56.274674 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="8.63679ms" I0213 18:03:56.274752 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="46.027µs" I0213 18:03:58.283342 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="8.465602ms" I0213 18:03:58.283409 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="35.827µs" I0213 18:04:18.682506 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:04:20.338578 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="19.989633ms" I0213 18:04:20.346586 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="7.971956ms" I0213 18:04:20.346709 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="77.696µs" I0213 18:04:20.350748 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="63.574µs" I0213 18:04:20.472029 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="17.184779ms" I0213 18:04:20.480104 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="8.042404ms" I0213 18:04:20.480189 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="53.315µs" I0213 18:04:20.484353 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="48.926µs" I0213 18:04:20.576930 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="16.989685ms" I0213 18:04:20.596291 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="19.32316ms" I0213 18:04:20.606580 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="10.252836ms" I0213 18:04:20.606673 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="62.846µs" I0213 18:04:20.679114 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="18.325152ms" I0213 18:04:20.686878 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="7.730158ms" I0213 18:04:20.686981 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="64.241µs" I0213 18:04:20.693560 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="62.331µs" I0213 18:04:21.086957 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="19.017726ms" I0213 18:04:21.095315 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="8.313546ms" I0213 18:04:21.095388 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="41.8µs" I0213 18:04:21.098602 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="34.479µs" I0213 18:04:21.512749 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="customruns.tekton.dev" I0213 18:04:21.512783 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="verificationpolicies.tekton.dev" I0213 18:04:21.512804 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelines.tekton.dev" I0213 18:04:21.512842 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="resolutionrequests.resolution.tekton.dev" I0213 18:04:21.512875 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="taskruns.tekton.dev" I0213 18:04:21.512904 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="tasks.tekton.dev" I0213 18:04:21.512937 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelineruns.tekton.dev" I0213 18:04:21.512966 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="stepactions.tekton.dev" I0213 18:04:21.513070 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:04:22.628482 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:04:22.713790 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:04:22.729046 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:04:24.321894 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="85.52µs" I0213 18:04:26.326824 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="91.86µs" I0213 18:04:29.344761 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="9.045512ms" I0213 18:04:29.344855 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="64.796µs" W0213 18:04:30.877850 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:30.884057 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:04:31.182133 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:31.190854 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:04:31.346262 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="63.067µs" I0213 18:04:33.353521 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="6.554729ms" I0213 18:04:33.353582 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="33.547µs" I0213 18:04:35.333282 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="7.913559ms" I0213 18:04:35.333375 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="56.683µs" I0213 18:04:37.341347 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="8.881579ms" I0213 18:04:37.341443 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="68.166µs" I0213 18:04:42.355909 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="8.512273ms" I0213 18:04:42.356009 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="62.87µs" W0213 18:04:45.892141 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:45.892850 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:04:46.193204 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:46.193856 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:04:49.493067 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="16.345549ms" I0213 18:04:49.504371 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="11.268158ms" I0213 18:04:49.504454 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="57.214µs" I0213 18:04:49.511379 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="53.875µs" I0213 18:04:49.593001 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="16.063346ms" I0213 18:04:49.597009 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:04:49.600204 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="7.173658ms" I0213 18:04:49.600311 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="78.083µs" I0213 18:04:49.613365 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="59.036µs" I0213 18:04:49.692253 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="15.906609ms" I0213 18:04:49.700873 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="8.583012ms" I0213 18:04:49.700942 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="44.675µs" I0213 18:04:49.707296 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="56.656µs" I0213 18:04:52.718687 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="eventlisteners.triggers.tekton.dev" I0213 18:04:52.718718 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggers.triggers.tekton.dev" I0213 18:04:52.718747 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="interceptors.triggers.tekton.dev" I0213 18:04:52.718772 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggerbindings.triggers.tekton.dev" I0213 18:04:52.718789 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggertemplates.triggers.tekton.dev" I0213 18:04:52.718869 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:04:52.738815 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:04:53.403790 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="9.871457ms" I0213 18:04:53.403919 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="91.481µs" I0213 18:04:53.819686 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:04:53.839174 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:04:56.409128 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="8.753189ms" I0213 18:04:56.409198 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="45.261µs" I0213 18:04:58.405075 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="75.484µs" I0213 18:04:59.665745 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:05:00.897092 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:00.897750 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:05:01.198185 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:01.198800 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:05:09.418470 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="8.891325ms" I0213 18:05:09.418552 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="51.272µs" I0213 18:05:09.789296 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:05:11.305520 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="23.80666ms" I0213 18:05:11.314497 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="8.865568ms" I0213 18:05:11.314571 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="48.541µs" I0213 18:05:11.320922 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="61.049µs" W0213 18:05:15.902315 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:15.902937 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:05:16.203199 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:16.203840 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:05:16.454013 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="9.003185ms" I0213 18:05:16.454118 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="70.868µs" I0213 18:05:18.950854 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-78b74b8648" duration="16.465929ms" I0213 18:05:18.959434 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-78b74b8648" duration="8.545387ms" I0213 18:05:18.959514 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-78b74b8648" duration="56.2µs" I0213 18:05:18.969736 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-78b74b8648" duration="59.097µs" I0213 18:05:19.005624 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="16.376293ms" I0213 18:05:19.014182 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="8.490241ms" I0213 18:05:19.014260 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="55.478µs" I0213 18:05:19.028178 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="63.14µs" I0213 18:05:19.175069 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="21.342656ms" I0213 18:05:19.183340 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="8.238087ms" I0213 18:05:19.183425 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="59.229µs" I0213 18:05:19.187105 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="49.275µs" I0213 18:05:19.953090 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:05:23.466450 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-78b74b8648" duration="59.019µs" I0213 18:05:26.480462 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="6.537557ms" I0213 18:05:26.480565 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="61.888µs" I0213 18:05:29.489875 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="5.992547ms" I0213 18:05:29.489953 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="46.89µs" W0213 18:05:30.914023 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:30.914700 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:05:31.208323 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:31.209006 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:05:40.309057 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:05:45.918957 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:45.919611 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:05:46.213041 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:46.213685 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:05:49.272003 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-78b74b8648" duration="68.12µs" I0213 18:05:49.290224 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-78b74b8648" duration="8.850874ms" I0213 18:05:49.290303 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-78b74b8648" duration="52.46µs" I0213 18:05:50.692829 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:05:53.329906 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="19.404458ms" I0213 18:05:53.338221 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="8.277571ms" I0213 18:05:53.338320 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="64.525µs" I0213 18:05:53.348031 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="50.97µs" I0213 18:05:53.827837 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="extensions.dashboard.tekton.dev" I0213 18:05:53.827891 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:05:53.849867 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:05:54.850810 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:05:54.928931 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:05:56.602323 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="77.381µs" I0213 18:05:56.619920 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="8.184544ms" I0213 18:05:56.620015 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="63.039µs" I0213 18:06:00.729774 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:06:00.924098 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:00.924814 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:06:01.217566 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:01.218252 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:06:01.446273 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="16.510809ms" I0213 18:06:01.454879 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="8.56976ms" I0213 18:06:01.454993 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="81.18µs" I0213 18:06:01.462125 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="63.198µs" E0213 18:06:02.013502 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"rbac.authorization.k8s.io/v1\", Kind:\"ClusterRole\", Name:\"tekton-dashboard-backend-edit\", UID:\"814f46fe-4cae-4dea-9db0-df4b6d16f76c\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-static-9fk9f\", UID:\"a9337de2-4bc8-4dde-8886-bc6cf2dedcc7\", Controller:(*bool)(0xc003482087), BlockOwnerDeletion:(*bool)(0xc003482088)}}}: clusterroles.rbac.authorization.k8s.io \"tekton-dashboard-backend-edit\" not found" logger="UnhandledError" E0213 18:06:02.038620 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"rbac.authorization.k8s.io/v1\", Kind:\"ClusterRole\", Name:\"tekton-dashboard-backend-view\", UID:\"2bd3feef-080b-4905-934b-ed5208dec51a\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-static-9fk9f\", UID:\"a9337de2-4bc8-4dde-8886-bc6cf2dedcc7\", Controller:(*bool)(0xc001cc9867), BlockOwnerDeletion:(*bool)(0xc001cc9868)}}}: clusterroles.rbac.authorization.k8s.io \"tekton-dashboard-backend-view\" not found" logger="UnhandledError" E0213 18:06:02.188260 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"rbac.authorization.k8s.io/v1\", Kind:\"ClusterRole\", Name:\"tekton-dashboard-tenant-view\", UID:\"97d73f77-2e86-4f47-a954-467da5cb5711\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-static-9fk9f\", UID:\"a9337de2-4bc8-4dde-8886-bc6cf2dedcc7\", Controller:(*bool)(0xc003482137), BlockOwnerDeletion:(*bool)(0xc003482138)}}}: clusterroles.rbac.authorization.k8s.io \"tekton-dashboard-tenant-view\" not found" logger="UnhandledError" E0213 18:06:02.525169 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:06:02.627733 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="9.295239ms" I0213 18:06:02.627820 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="56.031µs" I0213 18:06:02.655571 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="18.746256ms" I0213 18:06:02.665267 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="9.665429ms" I0213 18:06:02.665471 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="83.998µs" I0213 18:06:03.008078 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="89.472µs" I0213 18:06:03.227862 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="6.517245ms" I0213 18:06:03.227968 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="53.708µs" I0213 18:06:03.427078 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="9.438808ms" I0213 18:06:03.427195 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="71.058µs" I0213 18:06:03.566400 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="62.363µs" I0213 18:06:03.624446 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="75.935µs" I0213 18:06:03.635331 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="81.068µs" W0213 18:06:03.649450 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:06:03.650127 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:06:03.650781 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:06:03.650810 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:06:03.927392 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="69.574µs" I0213 18:06:03.934536 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="52.138µs" I0213 18:06:03.945874 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="7.712µs" I0213 18:06:04.289587 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="21.190984ms" I0213 18:06:04.297738 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="8.113143ms" I0213 18:06:04.297827 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="57.723µs" I0213 18:06:04.301595 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="60.499µs" W0213 18:06:05.424086 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:06:05.424724 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:06:05.425257 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:06:05.425283 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:06:05.638339 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="8.457087ms" I0213 18:06:05.638445 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="72.132µs" I0213 18:06:05.662912 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="17.298224ms" I0213 18:06:05.671715 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="8.75932ms" I0213 18:06:05.671794 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="52.151µs" I0213 18:06:05.813691 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="30.398105ms" I0213 18:06:05.822619 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="8.89278ms" I0213 18:06:05.822723 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="46.048µs" I0213 18:06:05.829607 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="59.044µs" I0213 18:06:05.848392 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="64.642µs" I0213 18:06:05.986787 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="22.94043ms" I0213 18:06:05.993710 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="6.890633ms" I0213 18:06:05.993788 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="46.944µs" I0213 18:06:05.996828 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="57.939µs" I0213 18:06:06.161731 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="16.895602ms" I0213 18:06:06.170861 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="9.093278ms" I0213 18:06:06.170937 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="48.373µs" I0213 18:06:06.182321 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="58.73µs" I0213 18:06:06.636664 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="94.96µs" I0213 18:06:06.644904 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="59.795µs" I0213 18:06:08.696153 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="0s" I0213 18:06:08.700891 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:08.706466 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:08.706688 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:08.716073 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:10.241584 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-74c4c9744c" duration="24.887881ms" I0213 18:06:10.250117 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-74c4c9744c" duration="8.502744ms" I0213 18:06:10.250181 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-74c4c9744c" duration="36.357µs" I0213 18:06:10.256518 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-74c4c9744c" duration="42.739µs" W0213 18:06:10.440322 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:06:10.441174 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:06:10.441799 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:06:10.441824 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:06:10.648285 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="57.095µs" I0213 18:06:10.662016 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="5.76197ms" I0213 18:06:10.662096 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="46.912µs" I0213 18:06:10.849256 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:06:12.244248 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="22.894746ms" I0213 18:06:12.250937 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="6.651348ms" I0213 18:06:12.250994 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="27.931µs" I0213 18:06:12.253940 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="33.597µs" I0213 18:06:12.657889 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="68.772µs" I0213 18:06:14.676416 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="8.949302ms" I0213 18:06:14.676492 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="42.381µs" W0213 18:06:15.928593 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:15.929264 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:06:16.222959 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:16.223701 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:06:21.230094 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:06:22.792311 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:06:22.792958 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:06:22.793580 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:06:22.793606 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:06:23.674374 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="8.868116ms" I0213 18:06:23.674448 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="45.777µs" I0213 18:06:24.858347 1 shared_informer.go:313] Waiting for caches to sync for garbage collector W0213 18:06:24.933429 1 shared_informer.go:597] resyncPeriod 21h47m59.164019087s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:24.933469 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="repositories.pipelinesascode.tekton.dev" I0213 18:06:24.933516 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:06:25.659152 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:06:25.733835 1 shared_informer.go:320] Caches are synced for resource quota W0213 18:06:30.934132 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:30.934854 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:06:31.228618 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:31.229281 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:06:36.729900 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:37.738035 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:40.745681 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-74c4c9744c" duration="65.869µs" I0213 18:06:41.487207 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:06:42.753869 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:42.775819 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="8.492967ms" I0213 18:06:42.775876 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="30.393µs" I0213 18:06:43.453260 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="19.289615ms" I0213 18:06:43.461164 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="7.857983ms" I0213 18:06:43.461230 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="36.085µs" I0213 18:06:43.470870 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="35.984µs" I0213 18:06:43.758444 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:43.761293 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:44.766175 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" W0213 18:06:45.941793 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:45.942482 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:06:46.233959 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:46.234536 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:06:47.028318 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:06:47.029079 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:06:47.029803 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:06:47.029831 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:06:50.792916 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="6.786513ms" I0213 18:06:50.792996 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="44.751µs" I0213 18:06:51.757621 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-74c4c9744c" duration="8.414675ms" I0213 18:06:51.757696 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-74c4c9744c" duration="31.584µs" I0213 18:06:51.835077 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:06:51.891743 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="22.310457ms" I0213 18:06:51.921685 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="29.90405ms" I0213 18:06:51.921784 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="67.743µs" I0213 18:06:51.951915 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="16.639178ms" I0213 18:06:51.960414 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="8.469972ms" I0213 18:06:51.960502 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="63.48µs" I0213 18:06:51.970910 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="46.527µs" I0213 18:06:52.013276 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-bfd46c7d6" duration="9.49332ms" I0213 18:06:52.013350 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-bfd46c7d6" duration="43.206µs" I0213 18:06:52.078950 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-7dcdc4cc4d" duration="9.220902ms" I0213 18:06:52.079025 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-7dcdc4cc4d" duration="45.022µs" I0213 18:06:55.668812 1 shared_informer.go:313] Waiting for caches to sync for garbage collector W0213 18:06:55.737928 1 shared_informer.go:597] resyncPeriod 14h15m3.438850186s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.737971 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="passwords.dex.coreos.com" W0213 18:06:55.737988 1 shared_informer.go:597] resyncPeriod 19h50m58.480928563s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738002 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedgeneratingpolicies.policies.kyverno.io" W0213 18:06:55.738018 1 shared_informer.go:597] resyncPeriod 13h38m6.545261491s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738034 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="updaterequests.kyverno.io" W0213 18:06:55.738052 1 shared_informer.go:597] resyncPeriod 12h10m25.969989974s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738069 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ephemeralreports.reports.kyverno.io" W0213 18:06:55.738081 1 shared_informer.go:597] resyncPeriod 13h22m27.670211043s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738100 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicerequests.dex.coreos.com" W0213 18:06:55.738115 1 shared_informer.go:597] resyncPeriod 21h18m13.61766178s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738136 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="offlinesessionses.dex.coreos.com" W0213 18:06:55.738151 1 shared_informer.go:597] resyncPeriod 18h6m48.224446395s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738199 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authcodes.dex.coreos.com" W0213 18:06:55.738213 1 shared_informer.go:597] resyncPeriod 19h44m16.137057059s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738228 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.policies.kyverno.io" W0213 18:06:55.738244 1 shared_informer.go:597] resyncPeriod 12h35m55.808180503s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738258 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedvalidatingpolicies.policies.kyverno.io" W0213 18:06:55.738268 1 shared_informer.go:597] resyncPeriod 12h14m24.120857717s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738282 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="signingkeies.dex.coreos.com" W0213 18:06:55.738293 1 shared_informer.go:597] resyncPeriod 21h46m58.781895161s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738305 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedimagevalidatingpolicies.policies.kyverno.io" W0213 18:06:55.738325 1 shared_informer.go:597] resyncPeriod 19h11m39.586484434s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738344 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cleanuppolicies.kyverno.io" W0213 18:06:55.738364 1 shared_informer.go:597] resyncPeriod 17h23m21.631768867s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738379 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policies.kyverno.io" W0213 18:06:55.738390 1 shared_informer.go:597] resyncPeriod 13h41m30.473970401s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738401 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespaceddeletingpolicies.policies.kyverno.io" W0213 18:06:55.738425 1 shared_informer.go:597] resyncPeriod 15h26m12.745920257s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738445 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="refreshtokens.dex.coreos.com" W0213 18:06:55.738464 1 shared_informer.go:597] resyncPeriod 16h36m54.389519342s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738480 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="oauth2clients.dex.coreos.com" W0213 18:06:55.738497 1 shared_informer.go:597] resyncPeriod 22h41m45.438041613s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738511 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedmutatingpolicies.policies.kyverno.io" W0213 18:06:55.738528 1 shared_informer.go:597] resyncPeriod 12h27m47.03865813s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738543 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="connectors.dex.coreos.com" W0213 18:06:55.738557 1 shared_informer.go:597] resyncPeriod 19h13m4.522600475s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738574 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authrequests.dex.coreos.com" I0213 18:06:55.738592 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicetokens.dex.coreos.com" W0213 18:06:55.738601 1 shared_informer.go:597] resyncPeriod 21h15m2.66787895s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738613 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.kyverno.io" W0213 18:06:55.738622 1 shared_informer.go:597] resyncPeriod 20h27m56.471485828s is smaller than resyncCheckPeriod 23h6m13.908067421s and the informer has already started. Changing it to 23h6m13.908067421s I0213 18:06:55.738657 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyreports.wgpolicyk8s.io" I0213 18:06:55.738828 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:06:55.804844 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="83.524µs" I0213 18:06:57.139953 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:06:57.169394 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:06:57.810718 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="78.888µs" I0213 18:06:58.827790 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="13.802683ms" I0213 18:06:58.827864 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="48.164µs" I0213 18:07:00.823555 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="87.484µs" W0213 18:07:00.947816 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:00.948449 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:07:01.239244 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:01.239869 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:07:03.831129 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:07:04.835279 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:07:04.838976 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:07:04.928610 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:07:05.841696 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:07:05.848461 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:07:10.226296 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="66.106µs" I0213 18:07:10.247588 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="9.309338ms" I0213 18:07:10.247703 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="83.838µs" W0213 18:07:15.953091 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:15.953782 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:07:16.244065 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:16.244696 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:07:22.685288 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:07:24.938270 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:07:24.938962 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:07:24.939537 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:07:24.939562 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:07:27.145116 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="components.appstudio.redhat.com" I0213 18:07:27.145160 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargets.appstudio.redhat.com" I0213 18:07:27.145190 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargetclaims.appstudio.redhat.com" I0213 18:07:27.145214 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="promotionruns.appstudio.redhat.com" I0213 18:07:27.145240 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="enterprisecontractpolicies.appstudio.redhat.com" I0213 18:07:27.145262 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="environments.appstudio.redhat.com" I0213 18:07:27.145290 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshotenvironmentbindings.appstudio.redhat.com" I0213 18:07:27.145332 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="applications.appstudio.redhat.com" I0213 18:07:27.145365 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentdetectionqueries.appstudio.redhat.com" I0213 18:07:27.145387 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshots.appstudio.redhat.com" I0213 18:07:27.145513 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:07:27.183432 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:07:28.246177 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:07:28.284370 1 shared_informer.go:320] Caches are synced for garbage collector W0213 18:07:30.958866 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:30.959536 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:07:31.248635 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:31.249277 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:07:38.189106 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="23.957333ms" I0213 18:07:38.195825 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="6.674815ms" I0213 18:07:38.195892 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="35.52µs" I0213 18:07:38.201720 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="30.729µs" I0213 18:07:42.941511 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="46.265µs" W0213 18:07:45.964776 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:45.965465 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:07:46.253560 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:46.254184 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:07:49.736235 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-7c6dbc6cd8" duration="27.983129ms" I0213 18:07:49.744498 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-7c6dbc6cd8" duration="8.222259ms" I0213 18:07:49.744579 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-7c6dbc6cd8" duration="45.822µs" I0213 18:07:49.751160 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-7c6dbc6cd8" duration="42.941µs" I0213 18:07:53.447395 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:07:53.959220 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="14.167203ms" I0213 18:07:53.959300 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="45.316µs" I0213 18:07:53.973554 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-7c6dbc6cd8" duration="42.009µs" I0213 18:07:58.252385 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="integrationtestscenarios.appstudio.redhat.com" I0213 18:07:58.252421 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseserviceconfigs.appstudio.redhat.com" I0213 18:07:58.252442 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplans.appstudio.redhat.com" I0213 18:07:58.252467 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplanadmissions.appstudio.redhat.com" I0213 18:07:58.252491 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentgroups.appstudio.redhat.com" I0213 18:07:58.252514 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalservicesconfigs.appstudio.redhat.com" I0213 18:07:58.252541 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalrequests.appstudio.redhat.com" I0213 18:07:58.252568 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releases.appstudio.redhat.com" I0213 18:07:58.252703 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:07:58.294635 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:07:59.353699 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:07:59.395489 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:07:59.705902 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="19.887199ms" I0213 18:07:59.714242 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="8.287303ms" I0213 18:07:59.714311 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="37.473µs" I0213 18:07:59.714339 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="16.48µs" I0213 18:07:59.727018 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="40.917µs" W0213 18:08:00.969896 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:00.970573 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:08:01.258064 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:01.258692 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:08:02.362788 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="21.146489ms" I0213 18:08:02.374874 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="12.04543ms" I0213 18:08:02.374945 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="37.582µs" I0213 18:08:02.393665 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="88.149µs" I0213 18:08:03.514145 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:08:04.988236 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-7c6dbc6cd8" duration="8.227389ms" I0213 18:08:04.988323 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-7c6dbc6cd8" duration="49.771µs" I0213 18:08:05.746316 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="77.002903ms" I0213 18:08:05.757527 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="10.936222ms" I0213 18:08:05.757620 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="60.413µs" I0213 18:08:05.757683 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="48.809µs" I0213 18:08:05.766715 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="104.394µs" I0213 18:08:06.012397 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="48.029µs" I0213 18:08:09.025154 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="44.855µs" I0213 18:08:10.039751 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="9.503654ms" I0213 18:08:10.039824 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="41.738µs" I0213 18:08:13.822951 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:08:15.974914 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:15.975605 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:08:16.263036 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:16.263677 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:08:17.027786 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="9.177211ms" I0213 18:08:17.027855 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="37.976µs" I0213 18:08:18.113068 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="51.615µs" I0213 18:08:19.056584 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="82.173µs" I0213 18:08:24.060915 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:08:24.395939 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:08:24.396619 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:08:24.397244 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:08:24.397265 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:08:28.088179 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="75.329µs" I0213 18:08:29.091944 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="61.037µs" W0213 18:08:30.985131 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:30.985722 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:08:31.267715 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:31.268373 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:08:36.037421 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="0s" I0213 18:08:37.122158 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="53.676µs" I0213 18:08:44.618477 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:08:45.990039 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:45.990705 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:08:46.272553 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:46.273209 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:08:54.656396 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:09:01.000792 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:01.001423 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:01.277938 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:01.278597 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:09:08.136943 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="8.935258ms" I0213 18:09:08.137033 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="62.7µs" W0213 18:09:13.856814 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:09:13.857488 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:09:13.858129 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:09:13.858151 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:09:16.006788 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:16.007430 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:16.284635 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:16.285241 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:31.016427 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:31.017048 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:31.289510 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:31.290125 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:09:38.146481 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="8.62383ms" I0213 18:09:38.146558 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-566cb55db" duration="52.351µs" W0213 18:09:46.023026 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:46.023702 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:46.294048 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:46.294731 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:09:46.598625 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-54d5c65ff5" duration="20.645321ms" I0213 18:09:46.613322 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-54d5c65ff5" duration="14.639837ms" I0213 18:09:46.613390 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-54d5c65ff5" duration="38.647µs" I0213 18:09:56.338264 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-54d5c65ff5" duration="46.413µs" W0213 18:09:58.868062 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:09:58.868748 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:09:58.869314 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:09:58.869335 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:09:59.374481 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="imagerepositories.appstudio.redhat.com" I0213 18:09:59.374599 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:09:59.420559 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:09:59.420633 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:09:59.475727 1 shared_informer.go:320] Caches are synced for resource quota W0213 18:10:01.028857 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:01.029517 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:01.299154 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:01.299735 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:10:07.351186 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-54d5c65ff5" duration="8.753488ms" I0213 18:10:07.351253 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-54d5c65ff5" duration="36.884µs" I0213 18:10:08.681473 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-55b57c8d8b" duration="19.900762ms" I0213 18:10:08.689453 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-55b57c8d8b" duration="7.947822ms" I0213 18:10:08.689511 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-55b57c8d8b" duration="36.131µs" I0213 18:10:08.696483 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-55b57c8d8b" duration="45.316µs" I0213 18:10:11.391468 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-55b57c8d8b" duration="6.541431ms" I0213 18:10:11.391545 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-55b57c8d8b" duration="46.618µs" I0213 18:10:11.422556 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="24.163643ms" I0213 18:10:11.430608 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="8.014211ms" I0213 18:10:11.430703 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="65.176µs" I0213 18:10:11.941558 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="53.664µs" I0213 18:10:12.393054 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="46.354µs" I0213 18:10:12.400725 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b65b54d4d" duration="42.565µs" W0213 18:10:16.034673 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:16.035361 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:16.305112 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:16.305710 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:10:16.532121 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:10:26.592558 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:10:31.044086 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:31.044686 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:31.310196 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:31.310886 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:38.932115 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:10:38.932821 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:10:38.933414 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:10:38.933435 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:10:46.049136 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:46.049810 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:46.315290 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:46.315883 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:01.058598 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:01.059317 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:01.320841 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:01.321485 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:16.063710 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:16.064374 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:16.326823 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:16.327452 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:31.074022 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:31.074582 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:31.331351 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:31.332012 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:34.311286 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:11:34.311989 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:11:34.312599 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:11:34.312621 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:11:37.643259 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:11:46.079449 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:46.080185 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:46.336809 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:46.337439 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:11:47.656086 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:11:57.678266 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:12:01.088861 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:01.089468 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:01.342025 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:01.342873 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:12:07.906880 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:12:16.094031 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:16.094777 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:16.347326 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:16.347997 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:12:18.198288 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:12:18.696392 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:12:18.697093 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:12:18.697685 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:12:18.697705 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:12:31.103499 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:31.104195 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:31.351970 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:31.352611 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:46.109236 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:46.109893 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:46.358267 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:46.358946 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:12:48.677500 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:12:59.063527 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:13:01.119096 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:01.119786 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:01.364589 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:01.365195 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:16.072420 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:13:16.073097 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:13:16.073702 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:13:16.073729 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:13:16.124609 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:16.125278 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:16.369297 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:16.369926 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:13:19.149536 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:13:29.248410 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:13:31.139766 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:31.140423 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:31.373854 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:31.374501 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:46.146468 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:46.147168 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:46.378261 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:46.378935 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:01.156063 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:01.156690 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:01.383364 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:01.383996 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:05.092585 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:14:05.093260 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:14:05.093815 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:14:05.093837 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:14:16.161308 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:16.161982 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:16.387926 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:16.388576 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:14:30.744322 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:14:31.171728 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:31.172354 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:31.393352 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:31.394040 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:14:40.909939 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:14:46.176977 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:46.177662 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:46.398341 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:46.398926 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:53.958867 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:14:53.959514 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:14:53.960186 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:14:53.960213 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:15:01.186790 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:01.187368 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:01.405757 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:01.406411 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:16.191766 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:16.192421 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:16.410836 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:16.411490 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:31.202897 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:31.203571 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:31.415183 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:31.415790 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:45.036768 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:15:45.037476 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:15:45.038146 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:15:45.038172 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:15:46.207943 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:46.208664 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:46.426201 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:46.426851 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:01.217987 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:01.218702 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:01.434227 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:01.434870 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:16:01.787396 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:16:11.925308 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:16:16.223493 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:16.224235 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:16.439233 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:16.439857 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:16:22.313312 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:16:30.076170 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:16:30.076862 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:16:30.077454 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:16:30.077477 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:16:31.233713 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:31.234368 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:31.445131 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:31.445770 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:46.239151 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:46.239863 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:46.449395 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:46.450047 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:01.249443 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:01.250125 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:01.354054 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:17:01.354773 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:17:01.355392 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:17:01.355413 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:17:01.453977 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:01.454587 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:17:13.345978 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:17:16.254924 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:16.255563 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:16.458772 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:16.459403 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:31.264662 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:31.265318 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:31.464093 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:31.464697 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:17:33.384294 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:17:44.801227 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:17:44.801949 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:17:44.802541 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:17:44.802568 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:17:46.269997 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:46.270667 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:46.468595 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:46.469242 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:01.280134 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:01.280828 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:01.473710 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:01.474354 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:16.286125 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:16.286804 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:16.478950 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:16.479572 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError"