I0204 17:43:38.556019 1 serving.go:386] Generated self-signed cert in-memory I0204 17:43:39.054487 1 controllermanager.go:185] "Starting" version="v1.32.5" I0204 17:43:39.054504 1 controllermanager.go:187] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0204 17:43:39.055619 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/etc/kubernetes/pki/front-proxy-ca.crt" I0204 17:43:39.055623 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/pki/ca.crt" I0204 17:43:39.055771 1 secure_serving.go:213] Serving securely on 127.0.0.1:10257 I0204 17:43:39.055849 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" I0204 17:43:39.055944 1 leaderelection.go:257] attempting to acquire leader lease kube-system/kube-controller-manager... E0204 17:43:39.945606 1 leaderelection.go:436] error retrieving resource lock kube-system/kube-controller-manager: leases.coordination.k8s.io "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "kube-system" I0204 17:43:42.293591 1 leaderelection.go:271] successfully acquired lease kube-system/kube-controller-manager I0204 17:43:42.293623 1 event.go:389] "Event occurred" object="kube-system/kube-controller-manager" fieldPath="" kind="Lease" apiVersion="coordination.k8s.io/v1" type="Normal" reason="LeaderElection" message="kind-mapt-control-plane_ce44a1fd-818a-479b-92f1-33baec349474 became leader" I0204 17:43:42.295639 1 controllermanager.go:765] "Started controller" controller="serviceaccount-token-controller" I0204 17:43:42.295695 1 shared_informer.go:313] Waiting for caches to sync for tokens I0204 17:43:42.306994 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-cleaner-controller" I0204 17:43:42.307067 1 cleaner.go:83] "Starting CSR cleaner controller" logger="certificatesigningrequest-cleaner-controller" I0204 17:43:42.313776 1 controllermanager.go:765] "Started controller" controller="bootstrap-signer-controller" I0204 17:43:42.313874 1 shared_informer.go:313] Waiting for caches to sync for bootstrap_signer I0204 17:43:42.327180 1 range_allocator.go:112] "No Secondary Service CIDR provided. Skipping filtering out secondary service addresses" logger="node-ipam-controller" I0204 17:43:42.327207 1 controllermanager.go:765] "Started controller" controller="node-ipam-controller" I0204 17:43:42.327213 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="volumeattributesclass-protection-controller" requiredFeatureGates=["VolumeAttributesClass"] I0204 17:43:42.327564 1 node_ipam_controller.go:141] "Starting ipam controller" logger="node-ipam-controller" I0204 17:43:42.327579 1 shared_informer.go:313] Waiting for caches to sync for node I0204 17:43:42.341091 1 controllermanager.go:765] "Started controller" controller="root-ca-certificate-publisher-controller" I0204 17:43:42.341157 1 publisher.go:107] "Starting root CA cert publisher controller" logger="root-ca-certificate-publisher-controller" I0204 17:43:42.341168 1 shared_informer.go:313] Waiting for caches to sync for crt configmap I0204 17:43:42.347594 1 controllermanager.go:765] "Started controller" controller="ephemeral-volume-controller" I0204 17:43:42.347673 1 controller.go:173] "Starting ephemeral volume controller" logger="ephemeral-volume-controller" I0204 17:43:42.347686 1 shared_informer.go:313] Waiting for caches to sync for ephemeral I0204 17:43:42.361437 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="horizontalpodautoscalers.autoscaling" I0204 17:43:42.361469 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="statefulsets.apps" I0204 17:43:42.361481 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deployments.apps" I0204 17:43:42.361504 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ingresses.networking.k8s.io" I0204 17:43:42.361531 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="csistoragecapacities.storage.k8s.io" I0204 17:43:42.361561 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="rolebindings.rbac.authorization.k8s.io" W0204 17:43:42.361573 1 shared_informer.go:597] resyncPeriod 16h18m13.367816881s is smaller than resyncCheckPeriod 23h11m7.858998347s and the informer has already started. Changing it to 23h11m7.858998347s I0204 17:43:42.361600 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="serviceaccounts" I0204 17:43:42.361618 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="jobs.batch" I0204 17:43:42.361643 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cronjobs.batch" I0204 17:43:42.361655 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="networkpolicies.networking.k8s.io" I0204 17:43:42.361667 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="leases.coordination.k8s.io" I0204 17:43:42.361690 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="replicasets.apps" I0204 17:43:42.361703 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="controllerrevisions.apps" I0204 17:43:42.361715 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="roles.rbac.authorization.k8s.io" W0204 17:43:42.361724 1 shared_informer.go:597] resyncPeriod 15h51m59.779052883s is smaller than resyncCheckPeriod 23h11m7.858998347s and the informer has already started. Changing it to 23h11m7.858998347s I0204 17:43:42.361749 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpoints" I0204 17:43:42.361770 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="daemonsets.apps" I0204 17:43:42.361787 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="poddisruptionbudgets.policy" I0204 17:43:42.361800 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpointslices.discovery.k8s.io" I0204 17:43:42.361843 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="limitranges" I0204 17:43:42.361862 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podtemplates" I0204 17:43:42.361878 1 controllermanager.go:765] "Started controller" controller="resourcequota-controller" I0204 17:43:42.361904 1 resource_quota_controller.go:300] "Starting resource quota controller" logger="resourcequota-controller" I0204 17:43:42.361921 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:43:42.361946 1 resource_quota_monitor.go:308] "QuotaMonitor running" logger="resourcequota-controller" I0204 17:43:42.362998 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-approving-controller" I0204 17:43:42.363011 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="storageversion-garbage-collector-controller" requiredFeatureGates=["APIServerIdentity","StorageVersionAPI"] I0204 17:43:42.363098 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-approving-controller" name="csrapproving" I0204 17:43:42.363117 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrapproving I0204 17:43:42.369220 1 controllermanager.go:765] "Started controller" controller="deployment-controller" I0204 17:43:42.369307 1 deployment_controller.go:173] "Starting controller" logger="deployment-controller" controller="deployment" I0204 17:43:42.369320 1 shared_informer.go:313] Waiting for caches to sync for deployment I0204 17:43:42.375360 1 controllermanager.go:765] "Started controller" controller="token-cleaner-controller" I0204 17:43:42.375374 1 controllermanager.go:743] "Warning: skipping controller" controller="storage-version-migrator-controller" I0204 17:43:42.375380 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="selinux-warning-controller" requiredFeatureGates=["SELinuxChangePolicy"] I0204 17:43:42.375434 1 tokencleaner.go:117] "Starting token cleaner controller" logger="token-cleaner-controller" I0204 17:43:42.375445 1 shared_informer.go:313] Waiting for caches to sync for token_cleaner I0204 17:43:42.375452 1 shared_informer.go:320] Caches are synced for token_cleaner I0204 17:43:42.395789 1 shared_informer.go:320] Caches are synced for tokens I0204 17:43:42.501986 1 controllermanager.go:765] "Started controller" controller="endpoints-controller" I0204 17:43:42.502063 1 endpoints_controller.go:182] "Starting endpoint controller" logger="endpoints-controller" I0204 17:43:42.502074 1 shared_informer.go:313] Waiting for caches to sync for endpoint I0204 17:43:42.696122 1 controllermanager.go:765] "Started controller" controller="disruption-controller" I0204 17:43:42.696136 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="service-lb-controller" I0204 17:43:42.696140 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="cloud-node-lifecycle-controller" I0204 17:43:42.696238 1 disruption.go:452] "Sending events to api server." logger="disruption-controller" I0204 17:43:42.696286 1 disruption.go:463] "Starting disruption controller" logger="disruption-controller" I0204 17:43:42.696297 1 shared_informer.go:313] Waiting for caches to sync for disruption I0204 17:43:42.896402 1 controllermanager.go:765] "Started controller" controller="validatingadmissionpolicy-status-controller" I0204 17:43:42.896467 1 shared_informer.go:313] Waiting for caches to sync for validatingadmissionpolicy-status I0204 17:43:42.946155 1 controllermanager.go:765] "Started controller" controller="taint-eviction-controller" I0204 17:43:42.946217 1 taint_eviction.go:281] "Starting" logger="taint-eviction-controller" controller="taint-eviction-controller" I0204 17:43:42.946233 1 taint_eviction.go:287] "Sending events to api server" logger="taint-eviction-controller" I0204 17:43:42.946249 1 shared_informer.go:313] Waiting for caches to sync for taint-eviction-controller I0204 17:43:43.099573 1 controllermanager.go:765] "Started controller" controller="endpointslice-controller" I0204 17:43:43.099677 1 endpointslice_controller.go:281] "Starting endpoint slice controller" logger="endpointslice-controller" I0204 17:43:43.099692 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice I0204 17:43:43.249702 1 controllermanager.go:765] "Started controller" controller="daemonset-controller" I0204 17:43:43.249773 1 daemon_controller.go:294] "Starting daemon sets controller" logger="daemonset-controller" I0204 17:43:43.249783 1 shared_informer.go:313] Waiting for caches to sync for daemon sets I0204 17:43:43.403244 1 controllermanager.go:765] "Started controller" controller="clusterrole-aggregation-controller" I0204 17:43:43.403291 1 clusterroleaggregation_controller.go:194] "Starting ClusterRoleAggregator controller" logger="clusterrole-aggregation-controller" I0204 17:43:43.403300 1 shared_informer.go:313] Waiting for caches to sync for ClusterRoleAggregator I0204 17:43:43.645785 1 controllermanager.go:765] "Started controller" controller="garbage-collector-controller" I0204 17:43:43.645785 1 garbagecollector.go:144] "Starting controller" logger="garbage-collector-controller" controller="garbagecollector" I0204 17:43:43.645830 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:43:43.645847 1 graph_builder.go:351] "Running" logger="garbage-collector-controller" component="GraphBuilder" I0204 17:43:43.696145 1 node_lifecycle_controller.go:432] "Controller will reconcile labels" logger="node-lifecycle-controller" I0204 17:43:43.696176 1 controllermanager.go:765] "Started controller" controller="node-lifecycle-controller" I0204 17:43:43.696225 1 node_lifecycle_controller.go:466] "Sending events to api server" logger="node-lifecycle-controller" I0204 17:43:43.696262 1 node_lifecycle_controller.go:477] "Starting node controller" logger="node-lifecycle-controller" I0204 17:43:43.696274 1 shared_informer.go:313] Waiting for caches to sync for taint I0204 17:43:43.949532 1 controllermanager.go:765] "Started controller" controller="persistentvolumeclaim-protection-controller" I0204 17:43:43.949549 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="kube-apiserver-serving-clustertrustbundle-publisher-controller" requiredFeatureGates=["ClusterTrustBundle"] I0204 17:43:43.949594 1 pvc_protection_controller.go:168] "Starting PVC protection controller" logger="persistentvolumeclaim-protection-controller" I0204 17:43:43.949605 1 shared_informer.go:313] Waiting for caches to sync for PVC protection I0204 17:43:44.099160 1 controllermanager.go:765] "Started controller" controller="statefulset-controller" I0204 17:43:44.099246 1 stateful_set.go:166] "Starting stateful set controller" logger="statefulset-controller" I0204 17:43:44.099261 1 shared_informer.go:313] Waiting for caches to sync for stateful set I0204 17:43:44.249774 1 controllermanager.go:765] "Started controller" controller="persistentvolume-expander-controller" I0204 17:43:44.249817 1 expand_controller.go:329] "Starting expand controller" logger="persistentvolume-expander-controller" I0204 17:43:44.249826 1 shared_informer.go:313] Waiting for caches to sync for expand I0204 17:43:44.401064 1 controllermanager.go:765] "Started controller" controller="replicaset-controller" I0204 17:43:44.401143 1 replica_set.go:217] "Starting controller" logger="replicaset-controller" name="replicaset" I0204 17:43:44.401157 1 shared_informer.go:313] Waiting for caches to sync for ReplicaSet I0204 17:43:44.549451 1 controllermanager.go:765] "Started controller" controller="ttl-after-finished-controller" I0204 17:43:44.549468 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="resourceclaim-controller" requiredFeatureGates=["DynamicResourceAllocation"] I0204 17:43:44.549521 1 ttlafterfinished_controller.go:112] "Starting TTL after finished controller" logger="ttl-after-finished-controller" I0204 17:43:44.549533 1 shared_informer.go:313] Waiting for caches to sync for TTL after finished I0204 17:43:44.699171 1 controllermanager.go:765] "Started controller" controller="endpointslice-mirroring-controller" I0204 17:43:44.699268 1 endpointslicemirroring_controller.go:227] "Starting EndpointSliceMirroring controller" logger="endpointslice-mirroring-controller" I0204 17:43:44.699280 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice_mirroring I0204 17:43:44.849622 1 controllermanager.go:765] "Started controller" controller="replicationcontroller-controller" I0204 17:43:44.849686 1 replica_set.go:217] "Starting controller" logger="replicationcontroller-controller" name="replicationcontroller" I0204 17:43:44.849697 1 shared_informer.go:313] Waiting for caches to sync for ReplicationController I0204 17:43:44.999690 1 controllermanager.go:765] "Started controller" controller="cronjob-controller" I0204 17:43:44.999763 1 cronjob_controllerv2.go:145] "Starting cronjob controller v2" logger="cronjob-controller" I0204 17:43:44.999773 1 shared_informer.go:313] Waiting for caches to sync for cronjob I0204 17:43:45.046320 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-serving" I0204 17:43:45.046340 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-serving I0204 17:43:45.046358 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0204 17:43:45.046446 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-client" I0204 17:43:45.046463 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-client I0204 17:43:45.046480 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0204 17:43:45.046606 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kube-apiserver-client" I0204 17:43:45.046617 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kube-apiserver-client I0204 17:43:45.046629 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0204 17:43:45.046744 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-signing-controller" I0204 17:43:45.046782 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-legacy-unknown" I0204 17:43:45.046791 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-legacy-unknown I0204 17:43:45.046802 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0204 17:43:45.199312 1 controllermanager.go:765] "Started controller" controller="ttl-controller" I0204 17:43:45.199393 1 ttl_controller.go:127] "Starting TTL controller" logger="ttl-controller" I0204 17:43:45.199404 1 shared_informer.go:313] Waiting for caches to sync for TTL I0204 17:43:45.349242 1 controllermanager.go:765] "Started controller" controller="persistentvolume-binder-controller" I0204 17:43:45.349321 1 pv_controller_base.go:308] "Starting persistent volume controller" logger="persistentvolume-binder-controller" I0204 17:43:45.349341 1 shared_informer.go:313] Waiting for caches to sync for persistent volume I0204 17:43:45.499674 1 controllermanager.go:765] "Started controller" controller="persistentvolume-attach-detach-controller" I0204 17:43:45.499759 1 attach_detach_controller.go:338] "Starting attach detach controller" logger="persistentvolume-attach-detach-controller" I0204 17:43:45.499774 1 shared_informer.go:313] Waiting for caches to sync for attach detach I0204 17:43:45.649567 1 controllermanager.go:765] "Started controller" controller="persistentvolume-protection-controller" I0204 17:43:45.649614 1 pv_protection_controller.go:81] "Starting PV protection controller" logger="persistentvolume-protection-controller" I0204 17:43:45.649621 1 shared_informer.go:313] Waiting for caches to sync for PV protection I0204 17:43:45.899752 1 controllermanager.go:765] "Started controller" controller="namespace-controller" I0204 17:43:45.899789 1 namespace_controller.go:202] "Starting namespace controller" logger="namespace-controller" I0204 17:43:45.899796 1 shared_informer.go:313] Waiting for caches to sync for namespace I0204 17:43:46.048639 1 controllermanager.go:765] "Started controller" controller="serviceaccount-controller" I0204 17:43:46.048657 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="service-cidr-controller" requiredFeatureGates=["MultiCIDRServiceAllocator"] I0204 17:43:46.048709 1 serviceaccounts_controller.go:114] "Starting service account controller" logger="serviceaccount-controller" I0204 17:43:46.048722 1 shared_informer.go:313] Waiting for caches to sync for service account I0204 17:43:46.345010 1 controllermanager.go:765] "Started controller" controller="horizontal-pod-autoscaler-controller" I0204 17:43:46.345026 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="node-route-controller" I0204 17:43:46.345049 1 horizontal.go:201] "Starting HPA controller" logger="horizontal-pod-autoscaler-controller" I0204 17:43:46.345057 1 shared_informer.go:313] Waiting for caches to sync for HPA I0204 17:43:46.498342 1 controllermanager.go:765] "Started controller" controller="legacy-serviceaccount-token-cleaner-controller" I0204 17:43:46.498369 1 legacy_serviceaccount_token_cleaner.go:103] "Starting legacy service account token cleaner controller" logger="legacy-serviceaccount-token-cleaner-controller" I0204 17:43:46.498375 1 shared_informer.go:313] Waiting for caches to sync for legacy-service-account-token-cleaner I0204 17:43:46.648847 1 controllermanager.go:765] "Started controller" controller="pod-garbage-collector-controller" I0204 17:43:46.648883 1 gc_controller.go:99] "Starting GC controller" logger="pod-garbage-collector-controller" I0204 17:43:46.648897 1 shared_informer.go:313] Waiting for caches to sync for GC I0204 17:43:46.799946 1 controllermanager.go:765] "Started controller" controller="job-controller" I0204 17:43:46.800044 1 job_controller.go:243] "Starting job controller" logger="job-controller" I0204 17:43:46.800060 1 shared_informer.go:313] Waiting for caches to sync for job I0204 17:43:46.801175 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:43:46.807575 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"kind-mapt-control-plane\" does not exist" I0204 17:43:46.808357 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:43:46.814116 1 shared_informer.go:320] Caches are synced for bootstrap_signer I0204 17:43:46.828371 1 shared_informer.go:320] Caches are synced for node I0204 17:43:46.828399 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller" I0204 17:43:46.828433 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller" I0204 17:43:46.828441 1 shared_informer.go:313] Waiting for caches to sync for cidrallocator I0204 17:43:46.828446 1 shared_informer.go:320] Caches are synced for cidrallocator I0204 17:43:46.834274 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="kind-mapt-control-plane" podCIDRs=["10.244.0.0/24"] I0204 17:43:46.834292 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:43:46.834310 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:43:46.841381 1 shared_informer.go:320] Caches are synced for crt configmap I0204 17:43:46.845625 1 shared_informer.go:320] Caches are synced for HPA I0204 17:43:46.846738 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kube-apiserver-client I0204 17:43:46.846752 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-client I0204 17:43:46.846775 1 shared_informer.go:320] Caches are synced for taint-eviction-controller I0204 17:43:46.846791 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:43:46.846806 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller" I0204 17:43:46.846811 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller" I0204 17:43:46.846779 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-serving I0204 17:43:46.847016 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-legacy-unknown I0204 17:43:46.847916 1 shared_informer.go:320] Caches are synced for ephemeral I0204 17:43:46.848851 1 shared_informer.go:320] Caches are synced for service account I0204 17:43:46.848969 1 shared_informer.go:320] Caches are synced for GC I0204 17:43:46.849920 1 shared_informer.go:320] Caches are synced for PV protection I0204 17:43:46.849934 1 shared_informer.go:320] Caches are synced for expand I0204 17:43:46.849945 1 shared_informer.go:320] Caches are synced for ReplicationController I0204 17:43:46.849953 1 shared_informer.go:320] Caches are synced for PVC protection I0204 17:43:46.849976 1 shared_informer.go:320] Caches are synced for daemon sets I0204 17:43:46.849982 1 shared_informer.go:320] Caches are synced for persistent volume I0204 17:43:46.850044 1 shared_informer.go:320] Caches are synced for TTL after finished I0204 17:43:46.862439 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:43:46.863500 1 shared_informer.go:320] Caches are synced for certificate-csrapproving I0204 17:43:46.869678 1 shared_informer.go:320] Caches are synced for deployment I0204 17:43:46.896646 1 shared_informer.go:320] Caches are synced for validatingadmissionpolicy-status I0204 17:43:46.896724 1 shared_informer.go:320] Caches are synced for taint I0204 17:43:46.896733 1 shared_informer.go:320] Caches are synced for disruption I0204 17:43:46.896770 1 node_lifecycle_controller.go:1234] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone="" I0204 17:43:46.896851 1 node_lifecycle_controller.go:886] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="kind-mapt-control-plane" I0204 17:43:46.896931 1 node_lifecycle_controller.go:1038] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller" I0204 17:43:46.899022 1 shared_informer.go:320] Caches are synced for legacy-service-account-token-cleaner I0204 17:43:46.900170 1 shared_informer.go:320] Caches are synced for endpoint_slice_mirroring I0204 17:43:46.900180 1 shared_informer.go:320] Caches are synced for stateful set I0204 17:43:46.900197 1 shared_informer.go:320] Caches are synced for namespace I0204 17:43:46.900208 1 shared_informer.go:320] Caches are synced for TTL I0204 17:43:46.900246 1 shared_informer.go:320] Caches are synced for attach detach I0204 17:43:46.900253 1 shared_informer.go:320] Caches are synced for cronjob I0204 17:43:46.900261 1 shared_informer.go:320] Caches are synced for job I0204 17:43:46.900249 1 shared_informer.go:320] Caches are synced for endpoint_slice I0204 17:43:46.901501 1 shared_informer.go:320] Caches are synced for ReplicaSet I0204 17:43:46.901526 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:43:46.902674 1 shared_informer.go:320] Caches are synced for endpoint I0204 17:43:46.903827 1 shared_informer.go:320] Caches are synced for ClusterRoleAggregator I0204 17:43:46.909090 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:43:47.703619 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:43:47.916349 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="359.458135ms" I0204 17:43:47.920578 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="361.646352ms" I0204 17:43:47.924687 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="8.300202ms" I0204 17:43:47.924760 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="43.376µs" I0204 17:43:47.933251 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="31.94µs" I0204 17:43:47.940880 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="20.271889ms" I0204 17:43:47.940955 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="47.713µs" I0204 17:43:52.745188 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:43:59.703064 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:43:59.710615 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:43:59.724402 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="38.384µs" I0204 17:43:59.724634 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="58.459µs" I0204 17:43:59.724677 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="19.417µs" I0204 17:43:59.737284 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="44.825µs" I0204 17:43:59.746376 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="37.198µs" I0204 17:43:59.753471 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="61.674µs" I0204 17:44:01.898333 1 node_lifecycle_controller.go:1057] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller" I0204 17:44:04.732716 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="62.658µs" I0204 17:44:04.755606 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="14.402267ms" I0204 17:44:04.755669 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="34.886µs" I0204 17:44:04.766615 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="11.30757ms" I0204 17:44:04.766713 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="40.722µs" I0204 17:44:13.750037 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="57.282µs" I0204 17:44:13.765723 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="7.810068ms" I0204 17:44:13.765778 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="30.603µs" I0204 17:44:33.929120 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:44:42.367201 1 namespace_controller.go:187] "Namespace has been deleted" logger="namespace-controller" namespace="test-pvc-ns" I0204 17:44:44.210058 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:44:46.908817 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="orders.acme.cert-manager.io" I0204 17:44:46.908861 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificaterequests.cert-manager.io" I0204 17:44:46.908898 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificates.cert-manager.io" I0204 17:44:46.908932 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="challenges.acme.cert-manager.io" I0204 17:44:46.908958 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="issuers.cert-manager.io" I0204 17:44:46.909018 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:44:46.920002 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:44:46.931925 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="24.371516ms" I0204 17:44:46.940499 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="8.540662ms" I0204 17:44:46.940560 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="35.841µs" I0204 17:44:46.943565 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="33.051µs" I0204 17:44:47.120249 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="15.92943ms" I0204 17:44:47.127971 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="7.682943ms" I0204 17:44:47.128028 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="36.332µs" I0204 17:44:47.130860 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="31.277µs" I0204 17:44:47.322125 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="17.918363ms" I0204 17:44:47.329979 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="7.82042ms" I0204 17:44:47.330040 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="36.536µs" I0204 17:44:47.333235 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="28.489µs" I0204 17:44:47.498084 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="0s" I0204 17:44:47.507498 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:44:47.514779 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:44:47.515023 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:44:47.525619 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:44:48.009813 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:44:48.020966 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:44:50.822363 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="8.072354ms" I0204 17:44:50.822462 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="68.848µs" I0204 17:44:51.826132 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="8.621711ms" I0204 17:44:51.826198 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="38.33µs" I0204 17:44:53.823152 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="45.168µs" I0204 17:44:54.824889 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:44:55.831446 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:44:59.834740 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="8.377469ms" I0204 17:44:59.834814 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="46.428µs" I0204 17:45:02.605007 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="16.245095ms" I0204 17:45:02.611061 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="6.028113ms" I0204 17:45:02.611125 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="41.278µs" I0204 17:45:02.617127 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="25.058µs" I0204 17:45:04.332362 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:45:04.838757 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:45:05.843313 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="38.403µs" I0204 17:45:05.844534 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:45:05.918246 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:45:06.850192 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:45:06.855317 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0204 17:45:08.852872 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="47.547µs" I0204 17:45:14.647024 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:45:16.871112 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="10.034894ms" I0204 17:45:16.871174 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="36.533µs" I0204 17:45:18.026805 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:45:18.127138 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:45:22.671337 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="19.527178ms" I0204 17:45:22.679991 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="8.626158ms" I0204 17:45:22.680049 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="34.169µs" I0204 17:45:22.683455 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="49.726µs" I0204 17:45:22.847196 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="15.620325ms" I0204 17:45:22.854962 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="7.735335ms" I0204 17:45:22.855015 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="27.697µs" I0204 17:45:22.858154 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="29.126µs" I0204 17:45:26.895238 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="5.729606ms" I0204 17:45:26.895292 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="28.386µs" I0204 17:45:28.899151 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="5.027904ms" I0204 17:45:28.899202 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="26.848µs" E0204 17:45:38.636752 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:38.642171 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:38.652541 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:38.672869 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:38.713763 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:38.794809 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:38.955570 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:39.276383 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:39.916942 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:40.828851 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonPipeline." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonPipeline, namespace: , name: pipeline, uid: a53bb5aa-c5a3-48bf-84f9-fd525ece8604]" E0204 17:45:40.834561 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonPipeline." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonPipeline, namespace: , name: pipeline, uid: a53bb5aa-c5a3-48bf-84f9-fd525ece8604]" E0204 17:45:40.844885 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonPipeline." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonPipeline, namespace: , name: pipeline, uid: a53bb5aa-c5a3-48bf-84f9-fd525ece8604]" E0204 17:45:40.866017 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonPipeline." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonPipeline, namespace: , name: pipeline, uid: a53bb5aa-c5a3-48bf-84f9-fd525ece8604]" E0204 17:45:40.906912 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonPipeline." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonPipeline, namespace: , name: pipeline, uid: a53bb5aa-c5a3-48bf-84f9-fd525ece8604]" E0204 17:45:40.987374 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonPipeline." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonPipeline, namespace: , name: pipeline, uid: a53bb5aa-c5a3-48bf-84f9-fd525ece8604]" E0204 17:45:41.148390 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonPipeline." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonPipeline, namespace: , name: pipeline, uid: a53bb5aa-c5a3-48bf-84f9-fd525ece8604]" E0204 17:45:41.197190 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: cda88cb3-fc18-4d7c-96be-a622e423e538]" E0204 17:45:41.469512 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonPipeline." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonPipeline, namespace: , name: pipeline, uid: a53bb5aa-c5a3-48bf-84f9-fd525ece8604]" E0204 17:45:41.520183 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonInstallerSet." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonInstallerSet, namespace: , name: pipeline-main-static-8dj72, uid: fc304107-e627-408a-8774-f51c486cca9c]" E0204 17:45:41.525755 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonInstallerSet." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonInstallerSet, namespace: , name: pipeline-main-static-8dj72, uid: fc304107-e627-408a-8774-f51c486cca9c]" E0204 17:45:41.536121 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonInstallerSet." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonInstallerSet, namespace: , name: pipeline-main-static-8dj72, uid: fc304107-e627-408a-8774-f51c486cca9c]" E0204 17:45:41.557174 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonInstallerSet." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonInstallerSet, namespace: , name: pipeline-main-static-8dj72, uid: fc304107-e627-408a-8774-f51c486cca9c]" E0204 17:45:41.597841 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonInstallerSet." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonInstallerSet, namespace: , name: pipeline-main-static-8dj72, uid: fc304107-e627-408a-8774-f51c486cca9c]" E0204 17:45:41.677965 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonInstallerSet." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonInstallerSet, namespace: , name: pipeline-main-static-8dj72, uid: fc304107-e627-408a-8774-f51c486cca9c]" E0204 17:45:41.838539 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonInstallerSet." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonInstallerSet, namespace: , name: pipeline-main-static-8dj72, uid: fc304107-e627-408a-8774-f51c486cca9c]" E0204 17:45:42.110513 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonPipeline." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonPipeline, namespace: , name: pipeline, uid: a53bb5aa-c5a3-48bf-84f9-fd525ece8604]" E0204 17:45:42.159610 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonInstallerSet." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonInstallerSet, namespace: , name: pipeline-main-static-8dj72, uid: fc304107-e627-408a-8774-f51c486cca9c]" I0204 17:45:45.281335 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:45:48.015731 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="customruns.tekton.dev" I0204 17:45:48.015760 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelineruns.tekton.dev" I0204 17:45:48.015781 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="taskruns.tekton.dev" I0204 17:45:48.015800 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="stepactions.tekton.dev" I0204 17:45:48.015815 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="verificationpolicies.tekton.dev" I0204 17:45:48.015836 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelines.tekton.dev" I0204 17:45:48.015853 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="tasks.tekton.dev" I0204 17:45:48.015871 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="resolutionrequests.resolution.tekton.dev" I0204 17:45:48.015933 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:45:48.136627 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:45:48.998724 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="18.752058ms" I0204 17:45:49.013787 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="15.027682ms" I0204 17:45:49.013855 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="40.788µs" I0204 17:45:49.095056 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="15.847033ms" I0204 17:45:49.102625 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="7.536697ms" I0204 17:45:49.102694 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="47.894µs" I0204 17:45:49.105652 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="35.236µs" I0204 17:45:49.136986 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:45:49.199837 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="15.813359ms" I0204 17:45:49.216388 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:45:49.216496 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="16.623933ms" I0204 17:45:49.216581 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="59.7µs" I0204 17:45:49.222907 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="52.227µs" I0204 17:45:49.301453 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="16.566575ms" I0204 17:45:49.309306 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="7.821374ms" I0204 17:45:49.309376 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="47.007µs" I0204 17:45:49.315004 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="43.001µs" I0204 17:45:49.442566 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="18.225243ms" I0204 17:45:49.451585 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="8.976548ms" I0204 17:45:49.451658 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="42.446µs" I0204 17:45:49.460029 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="35.047µs" I0204 17:45:52.935110 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="65.463µs" I0204 17:45:54.938667 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="58.44µs" I0204 17:45:55.479122 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:45:57.949908 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="5.538988ms" I0204 17:45:57.949988 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="47.641µs" W0204 17:46:00.310568 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:46:00.317127 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:46:00.952566 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="61.098µs" W0204 17:46:01.311576 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:46:01.318878 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:46:03.944445 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="6.786549ms" I0204 17:46:03.944522 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="49.918µs" I0204 17:46:03.966072 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="8.252272ms" I0204 17:46:03.966135 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="34.583µs" I0204 17:46:05.885321 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:46:05.948680 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="6.853057ms" I0204 17:46:05.948750 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="41.576µs" I0204 17:46:11.961796 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="5.675935ms" I0204 17:46:11.961861 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="42.759µs" W0204 17:46:15.322687 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:46:15.323216 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:46:16.198952 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:46:16.321369 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:46:16.321912 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:46:18.835216 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="14.022944ms" I0204 17:46:18.843270 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="8.018514ms" I0204 17:46:18.843340 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="43.64µs" I0204 17:46:18.853236 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="47.124µs" I0204 17:46:18.934954 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="15.412094ms" I0204 17:46:18.941029 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="6.046776ms" I0204 17:46:18.941083 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="33.297µs" I0204 17:46:18.944268 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="40.756µs" I0204 17:46:19.038769 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="18.973743ms" I0204 17:46:19.046548 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="7.740619ms" I0204 17:46:19.046615 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="39.576µs" I0204 17:46:19.049799 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="46.712µs" I0204 17:46:19.143188 1 shared_informer.go:313] Waiting for caches to sync for garbage collector W0204 17:46:19.219500 1 shared_informer.go:597] resyncPeriod 18h55m19.188691836s is smaller than resyncCheckPeriod 22h0m4.109362255s and the informer has already started. Changing it to 22h0m4.109362255s I0204 17:46:19.219537 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="eventlisteners.triggers.tekton.dev" W0204 17:46:19.219571 1 shared_informer.go:597] resyncPeriod 17h22m48.899782164s is smaller than resyncCheckPeriod 22h0m4.109362255s and the informer has already started. Changing it to 22h0m4.109362255s I0204 17:46:19.219586 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggers.triggers.tekton.dev" W0204 17:46:19.219600 1 shared_informer.go:597] resyncPeriod 21h40m22.563835877s is smaller than resyncCheckPeriod 22h0m4.109362255s and the informer has already started. Changing it to 22h0m4.109362255s I0204 17:46:19.219616 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggerbindings.triggers.tekton.dev" W0204 17:46:19.219631 1 shared_informer.go:597] resyncPeriod 16h5m23.6125632s is smaller than resyncCheckPeriod 22h0m4.109362255s and the informer has already started. Changing it to 22h0m4.109362255s I0204 17:46:19.219646 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="interceptors.triggers.tekton.dev" W0204 17:46:19.219663 1 shared_informer.go:597] resyncPeriod 13h4m12.42594104s is smaller than resyncCheckPeriod 22h0m4.109362255s and the informer has already started. Changing it to 22h0m4.109362255s I0204 17:46:19.219680 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggertemplates.triggers.tekton.dev" I0204 17:46:19.219734 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:46:20.220105 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:46:20.244257 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:46:23.000696 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="5.618761ms" I0204 17:46:23.000794 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="68.451µs" I0204 17:46:25.004047 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="5.360139ms" I0204 17:46:25.004110 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="37.737µs" I0204 17:46:28.007639 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="75.051µs" W0204 17:46:30.327043 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:46:30.327638 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:46:31.325321 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:46:31.325865 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:46:39.020466 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="8.567887ms" I0204 17:46:39.020550 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="54.676µs" I0204 17:46:40.790976 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="17.611261ms" I0204 17:46:40.800397 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="9.386102ms" I0204 17:46:40.800568 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="65.503µs" I0204 17:46:40.800619 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="33.214µs" I0204 17:46:45.047994 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="5.975832ms" I0204 17:46:45.048081 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="58.584µs" W0204 17:46:45.334038 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:46:45.334613 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:46:46.329334 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:46:46.329859 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:46:46.859396 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:46:47.304068 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="15.503555ms" I0204 17:46:47.308938 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="4.832413ms" I0204 17:46:47.309059 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="55.721µs" I0204 17:46:47.312955 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="32.417µs" I0204 17:46:47.416704 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="11.442405ms" I0204 17:46:47.424460 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="7.723119ms" I0204 17:46:47.424543 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="57.577µs" I0204 17:46:47.433621 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="51.792µs" I0204 17:46:47.525578 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="17.185396ms" I0204 17:46:47.533783 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="8.172524ms" I0204 17:46:47.533870 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="55.238µs" I0204 17:46:47.538820 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="55.312µs" I0204 17:46:52.063326 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="63.143µs" I0204 17:46:55.081365 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="11.194106ms" I0204 17:46:55.081464 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="67.852µs" I0204 17:46:58.083394 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="5.551619ms" I0204 17:46:58.083501 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="48.359µs" W0204 17:47:00.339016 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:47:00.339829 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:47:01.334902 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:47:01.335634 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:47:15.348358 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:47:15.348962 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:47:16.339772 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:47:16.340354 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:47:17.621697 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="78.73µs" I0204 17:47:17.642548 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="11.725905ms" I0204 17:47:17.642638 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="56.323µs" I0204 17:47:17.785720 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:47:20.227221 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="extensions.dashboard.tekton.dev" I0204 17:47:20.227280 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:47:20.254214 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:47:21.254475 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:47:21.328307 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:47:21.701368 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="19.600944ms" I0204 17:47:21.709348 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="7.90857ms" I0204 17:47:21.709446 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="73.876µs" I0204 17:47:21.713071 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="49.088µs" I0204 17:47:25.137488 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="83.942µs" I0204 17:47:25.153231 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="7.63527ms" I0204 17:47:25.153303 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="45.759µs" W0204 17:47:30.352921 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:47:30.353522 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:47:30.489886 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="15.322921ms" I0204 17:47:30.497806 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="7.882647ms" I0204 17:47:30.497884 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="46.021µs" I0204 17:47:30.501497 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="103.115µs" E0204 17:47:31.072234 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"rbac.authorization.k8s.io/v1\", Kind:\"ClusterRole\", Name:\"tekton-dashboard-backend-edit\", UID:\"1df1b749-198b-4656-8436-4e1ba58c4306\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-static-csdtc\", UID:\"eceb6be3-2103-4a26-a298-d0221e5345d7\", Controller:(*bool)(0xc0019bc747), BlockOwnerDeletion:(*bool)(0xc0019bc748)}}}: clusterroles.rbac.authorization.k8s.io \"tekton-dashboard-backend-edit\" not found" logger="UnhandledError" I0204 17:47:31.161856 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="8.331563ms" I0204 17:47:31.161995 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="57.657µs" I0204 17:47:31.185073 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="15.58677ms" I0204 17:47:31.191794 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="6.694832ms" I0204 17:47:31.191859 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="38.393µs" E0204 17:47:31.272155 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"rbac.authorization.k8s.io/v1\", Kind:\"ClusterRole\", Name:\"tekton-dashboard-backend-view\", UID:\"cc33a854-138e-4e08-846a-55157a38d04d\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-static-csdtc\", UID:\"eceb6be3-2103-4a26-a298-d0221e5345d7\", Controller:(*bool)(0xc0019bc880), BlockOwnerDeletion:(*bool)(0xc0019bc881)}}}: clusterroles.rbac.authorization.k8s.io \"tekton-dashboard-backend-view\" not found" logger="UnhandledError" W0204 17:47:31.379719 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:47:31.380246 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:47:31.490921 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="61.797µs" E0204 17:47:31.647051 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:47:32.157566 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="67.141µs" I0204 17:47:32.161082 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="45.173µs" I0204 17:47:32.203556 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="4.516889ms" I0204 17:47:32.203622 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="42.189µs" I0204 17:47:32.405068 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="5.247286ms" I0204 17:47:32.405137 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="41.954µs" I0204 17:47:32.522228 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="60.955µs" W0204 17:47:32.901179 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:47:32.901754 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:47:32.902265 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:47:32.902290 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:47:33.158707 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="61.679µs" I0204 17:47:33.162548 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="62.084µs" I0204 17:47:33.169765 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="4.905µs" I0204 17:47:33.432083 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="9.515295ms" I0204 17:47:33.436746 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="4.633773ms" I0204 17:47:33.436808 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="39.108µs" I0204 17:47:33.441076 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="76.616µs" I0204 17:47:34.165490 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="5.080475ms" I0204 17:47:34.165564 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="46.693µs" I0204 17:47:34.179914 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="9.460265ms" I0204 17:47:34.184588 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="4.636807ms" I0204 17:47:34.184653 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="42.867µs" I0204 17:47:34.305751 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="86.597µs" I0204 17:47:34.663190 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="11.620154ms" I0204 17:47:34.668267 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="5.043627ms" I0204 17:47:34.668321 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="32.208µs" I0204 17:47:34.675108 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="29.977µs" I0204 17:47:34.842815 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="12.337118ms" I0204 17:47:34.855193 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="12.345407ms" I0204 17:47:34.855250 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="34.76µs" I0204 17:47:34.861021 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="32.593µs" I0204 17:47:35.030748 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="18.934862ms" I0204 17:47:35.039250 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="8.472424ms" I0204 17:47:35.039307 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="38.456µs" I0204 17:47:35.050127 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="29.287µs" I0204 17:47:35.167124 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="66.584µs" I0204 17:47:35.179207 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="43.394µs" W0204 17:47:35.784095 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:47:35.784639 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:47:35.785041 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:47:35.785061 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:47:37.535949 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="0s" I0204 17:47:37.541259 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:47:37.549305 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:47:37.549371 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:47:37.559829 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:47:39.048271 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-7799bb6ddd" duration="11.965238ms" I0204 17:47:39.058203 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-7799bb6ddd" duration="9.902818ms" I0204 17:47:39.058251 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-7799bb6ddd" duration="25.902µs" I0204 17:47:39.177831 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="52.343µs" I0204 17:47:40.191532 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="8.066904ms" I0204 17:47:40.191595 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="37.275µs" I0204 17:47:40.810277 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="16.801317ms" I0204 17:47:40.816334 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="6.028853ms" I0204 17:47:40.816388 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="26.472µs" I0204 17:47:40.822561 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="21.297µs" W0204 17:47:40.823158 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:47:40.823747 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:47:40.824209 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:47:40.824230 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:47:42.189025 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="59.724µs" I0204 17:47:45.207592 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="6.034626ms" I0204 17:47:45.207674 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="51.999µs" W0204 17:47:45.360979 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:47:45.361528 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:47:46.384791 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:47:46.385343 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:47:47.698692 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:47:47.699219 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:47:47.699725 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:47:47.699746 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:47:48.577562 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:47:51.261030 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:47:51.333691 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="repositories.pipelinesascode.tekton.dev" I0204 17:47:51.333730 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:47:52.434173 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:47:52.461544 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:47:53.204516 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="8.930494ms" I0204 17:47:53.204599 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="54.756µs" I0204 17:47:58.746499 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:47:59.235935 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:00.243979 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" W0204 17:48:00.366514 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:48:00.367085 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:48:01.389964 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:48:01.390570 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:48:03.245148 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-7799bb6ddd" duration="42.073µs" I0204 17:48:04.249256 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:04.255764 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-7799bb6ddd" duration="45.27µs" I0204 17:48:05.257696 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:05.265721 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="8.119407ms" I0204 17:48:05.265769 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:05.265777 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="28.122µs" I0204 17:48:05.917709 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="18.237541ms" I0204 17:48:05.925313 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="7.56764ms" I0204 17:48:05.925362 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="25.831µs" I0204 17:48:05.934505 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="23.79µs" I0204 17:48:06.266330 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:06.270648 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-7799bb6ddd" duration="8.11941ms" I0204 17:48:06.270701 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-7799bb6ddd" duration="28.244µs" W0204 17:48:08.644564 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:48:08.645165 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:48:08.645785 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:48:08.645809 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:48:13.283437 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="5.123768ms" I0204 17:48:13.283492 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="29.899µs" I0204 17:48:14.093961 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="13.90471ms" I0204 17:48:14.099632 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="5.644299ms" I0204 17:48:14.099698 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="41.958µs" I0204 17:48:14.103905 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="44.037µs" I0204 17:48:14.158757 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="10.891493ms" I0204 17:48:14.166005 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="7.223014ms" I0204 17:48:14.166058 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="30.984µs" I0204 17:48:14.173982 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="39.922µs" I0204 17:48:14.234987 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-bfd46c7d6" duration="15.608279ms" I0204 17:48:14.235040 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-bfd46c7d6" duration="30.092µs" I0204 17:48:14.297931 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-7dcdc4cc4d" duration="8.789817ms" I0204 17:48:14.297983 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-7dcdc4cc4d" duration="33.109µs" W0204 17:48:15.374130 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:48:15.374820 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:48:16.395316 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:48:16.395919 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:48:18.296893 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="60.015µs" I0204 17:48:19.151145 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:48:19.302171 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="77.848µs" I0204 17:48:20.308204 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:20.330678 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="8.862775ms" I0204 17:48:20.330736 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="34.997µs" I0204 17:48:21.311145 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:21.315138 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:21.425061 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:22.316242 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="67.433µs" I0204 17:48:22.317830 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:22.323007 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0204 17:48:22.438476 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="updaterequests.kyverno.io" I0204 17:48:22.438502 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespaceddeletingpolicies.policies.kyverno.io" I0204 17:48:22.438515 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedgeneratingpolicies.policies.kyverno.io" I0204 17:48:22.438532 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedmutatingpolicies.policies.kyverno.io" I0204 17:48:22.438546 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="connectors.dex.coreos.com" I0204 17:48:22.438561 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="oauth2clients.dex.coreos.com" I0204 17:48:22.438576 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.kyverno.io" I0204 17:48:22.438589 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedimagevalidatingpolicies.policies.kyverno.io" I0204 17:48:22.438602 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authrequests.dex.coreos.com" I0204 17:48:22.438617 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyreports.wgpolicyk8s.io" I0204 17:48:22.438630 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cleanuppolicies.kyverno.io" I0204 17:48:22.438640 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="signingkeies.dex.coreos.com" I0204 17:48:22.438653 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="refreshtokens.dex.coreos.com" I0204 17:48:22.438663 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ephemeralreports.reports.kyverno.io" I0204 17:48:22.438676 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicerequests.dex.coreos.com" I0204 17:48:22.438684 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policies.kyverno.io" I0204 17:48:22.438708 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authcodes.dex.coreos.com" I0204 17:48:22.438722 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="passwords.dex.coreos.com" I0204 17:48:22.438734 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicetokens.dex.coreos.com" I0204 17:48:22.438746 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.policies.kyverno.io" I0204 17:48:22.438754 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedvalidatingpolicies.policies.kyverno.io" I0204 17:48:22.438767 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="offlinesessionses.dex.coreos.com" I0204 17:48:22.438924 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:48:22.472200 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:48:23.539131 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:48:23.672807 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:48:26.411639 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="69.547µs" I0204 17:48:27.341773 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="5.855383ms" I0204 17:48:27.341846 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="44.559µs" W0204 17:48:30.379240 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:48:30.379892 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:48:31.400911 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:48:31.401542 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:48:39.701433 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:48:45.388308 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:48:45.388878 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:48:46.405447 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:48:46.405990 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:48:50.080128 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:48:53.373208 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:48:53.373790 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:48:53.374379 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:48:53.374401 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:48:53.544212 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releases.appstudio.redhat.com" I0204 17:48:53.544242 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplanadmissions.appstudio.redhat.com" I0204 17:48:53.544254 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshotenvironmentbindings.appstudio.redhat.com" I0204 17:48:53.544275 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="environments.appstudio.redhat.com" I0204 17:48:53.544291 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalservicesconfigs.appstudio.redhat.com" I0204 17:48:53.544310 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentdetectionqueries.appstudio.redhat.com" I0204 17:48:53.544328 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargetclaims.appstudio.redhat.com" I0204 17:48:53.544352 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="applications.appstudio.redhat.com" I0204 17:48:53.544374 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="components.appstudio.redhat.com" I0204 17:48:53.544389 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalrequests.appstudio.redhat.com" I0204 17:48:53.544431 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="enterprisecontractpolicies.appstudio.redhat.com" I0204 17:48:53.544454 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargets.appstudio.redhat.com" I0204 17:48:53.544474 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseserviceconfigs.appstudio.redhat.com" I0204 17:48:53.544488 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="promotionruns.appstudio.redhat.com" I0204 17:48:53.544514 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshots.appstudio.redhat.com" I0204 17:48:53.544530 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplans.appstudio.redhat.com" I0204 17:48:53.544729 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:48:53.683391 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:48:54.628008 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5c6bd754f" duration="19.91243ms" I0204 17:48:54.635944 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5c6bd754f" duration="7.82404ms" I0204 17:48:54.636004 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5c6bd754f" duration="34.282µs" I0204 17:48:54.645219 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5c6bd754f" duration="26.36µs" I0204 17:48:54.745477 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:48:54.783877 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:48:59.407745 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5c6bd754f" duration="70.403µs" W0204 17:49:00.393512 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:49:00.394142 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:49:01.410254 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:49:01.410848 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:49:06.655696 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-57d7866648" duration="19.432367ms" I0204 17:49:06.664156 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-57d7866648" duration="8.423587ms" I0204 17:49:06.664239 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-57d7866648" duration="51.925µs" I0204 17:49:06.670890 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-57d7866648" duration="41.943µs" I0204 17:49:10.417897 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5c6bd754f" duration="6.332729ms" I0204 17:49:10.417975 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5c6bd754f" duration="42.401µs" I0204 17:49:10.436472 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-57d7866648" duration="64.089µs" W0204 17:49:15.401899 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:49:15.402466 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:49:16.415355 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:49:16.416015 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:49:18.024592 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-5f4b6cc8bb" duration="19.387584ms" I0204 17:49:18.032166 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-5f4b6cc8bb" duration="7.542971ms" I0204 17:49:18.032226 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-5f4b6cc8bb" duration="36.8µs" I0204 17:49:18.041388 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-5f4b6cc8bb" duration="34.78µs" I0204 17:49:20.170233 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:49:20.812876 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="31.95925ms" I0204 17:49:20.820977 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="8.063434ms" I0204 17:49:20.821041 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="31.697µs" I0204 17:49:20.825575 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="36.886µs" I0204 17:49:21.457650 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-57d7866648" duration="9.008852ms" I0204 17:49:21.457713 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-57d7866648" duration="35.052µs" I0204 17:49:24.123664 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="17.88834ms" I0204 17:49:24.135623 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="11.917676ms" I0204 17:49:24.135728 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="70.622µs" I0204 17:49:24.474981 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-5f4b6cc8bb" duration="42.141µs" I0204 17:49:24.750606 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="integrationtestscenarios.appstudio.redhat.com" I0204 17:49:24.750650 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentgroups.appstudio.redhat.com" I0204 17:49:24.750723 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:49:24.791965 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:49:25.792690 1 shared_informer.go:320] Caches are synced for garbage collector I0204 17:49:25.851626 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:49:26.483497 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="46.418µs" I0204 17:49:26.500349 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="8.146183ms" I0204 17:49:26.500439 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="60.661µs" W0204 17:49:30.691817 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:49:30.692475 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:49:31.419997 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:49:31.420637 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:49:33.614744 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="71.498µs" I0204 17:49:34.511124 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="58.576µs" I0204 17:49:35.488774 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-5f4b6cc8bb" duration="8.859767ms" I0204 17:49:35.488842 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-5f4b6cc8bb" duration="34.425µs" W0204 17:49:37.272984 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:49:37.273610 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:49:37.274176 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:49:37.274198 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:49:39.528139 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="74.46µs" I0204 17:49:40.645396 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:49:41.534888 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="60.315µs" W0204 17:49:45.700856 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:49:45.701527 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:49:46.424629 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:49:46.425218 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:49:48.561360 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="59.028µs" I0204 17:49:50.868193 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:50:00.705921 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:50:00.706543 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:50:01.428793 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:50:01.429353 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:50:06.034765 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="0s" W0204 17:50:15.714746 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:50:15.715299 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:50:16.433606 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:50:16.434165 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:50:16.647464 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:50:16.648164 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:50:16.648747 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:50:16.648776 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:50:19.574806 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="8.515055ms" I0204 17:50:19.574878 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="46.546µs" I0204 17:50:21.374225 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:50:30.719962 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:50:30.720591 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:50:31.438212 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:50:31.438754 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:50:31.596147 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:50:45.728908 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:50:45.729465 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:50:46.442992 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:50:46.443537 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:50:49.583638 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="8.640676ms" I0204 17:50:49.583812 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7bbcd85f46" duration="59.029µs" I0204 17:50:58.292197 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-76dd6c864" duration="21.450451ms" I0204 17:50:58.300274 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-76dd6c864" duration="8.040821ms" I0204 17:50:58.300328 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-76dd6c864" duration="27.823µs" I0204 17:50:58.309845 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-76dd6c864" duration="37.105µs" W0204 17:51:00.733993 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:51:00.734597 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:51:01.447995 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:51:01.448563 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:51:08.759432 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-76dd6c864" duration="58.475µs" I0204 17:51:12.503503 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:51:14.105884 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:51:14.106577 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:51:14.107122 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:51:14.107143 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0204 17:51:15.743531 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:51:15.744149 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:51:16.453080 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:51:16.453663 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:51:19.773147 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-76dd6c864" duration="8.528318ms" I0204 17:51:19.773217 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-76dd6c864" duration="37.617µs" I0204 17:51:21.092597 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b5bd8d655" duration="19.749145ms" I0204 17:51:21.100662 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b5bd8d655" duration="8.027475ms" I0204 17:51:21.100734 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b5bd8d655" duration="41.047µs" I0204 17:51:21.100775 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b5bd8d655" duration="22.599µs" I0204 17:51:21.108235 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b5bd8d655" duration="29.308µs" I0204 17:51:22.885586 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:51:23.811429 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b5bd8d655" duration="6.247628ms" I0204 17:51:23.811512 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6b5bd8d655" duration="39.294µs" I0204 17:51:23.833798 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="15.88259ms" I0204 17:51:23.841729 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="7.903858ms" I0204 17:51:23.841787 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="30.203µs" I0204 17:51:24.281615 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="43.022µs" I0204 17:51:24.807666 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="48.493µs" I0204 17:51:24.811514 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6796cf9679" duration="38.608µs" I0204 17:51:25.816159 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0204 17:51:25.869391 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="imagerepositories.appstudio.redhat.com" I0204 17:51:25.869474 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0204 17:51:25.869507 1 shared_informer.go:320] Caches are synced for resource quota I0204 17:51:25.916912 1 shared_informer.go:320] Caches are synced for garbage collector W0204 17:51:30.748160 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:51:30.748814 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:51:31.458199 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:51:31.458771 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:51:33.145010 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:51:45.758242 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:51:45.758852 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:51:46.463005 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:51:46.463615 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:51:59.207501 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:51:59.208134 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:51:59.208712 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:51:59.208739 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0204 17:52:00.763328 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:52:00.763977 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:52:01.467844 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:52:01.468429 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:52:15.772486 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:52:15.773046 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:52:16.472690 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:52:16.473230 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:52:30.777776 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:52:30.778400 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:52:31.477424 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:52:31.477955 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:52:41.863865 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:52:41.864495 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:52:41.864978 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:52:41.864999 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0204 17:52:45.786657 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:52:45.787175 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:52:46.481917 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:52:46.482441 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:53:00.791890 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:53:00.792548 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:53:01.487037 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:53:01.487610 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:53:05.134771 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:53:15.229632 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:53:15.800525 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:53:15.801124 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:53:16.492198 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:53:16.492832 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:53:17.695086 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:53:17.695701 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:53:17.696247 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:53:17.696270 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:53:25.424011 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:53:30.805799 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:53:30.806383 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:53:31.496449 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:53:31.496977 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:53:35.441817 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:53:45.534897 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:53:45.815678 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:53:45.816288 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:53:46.501432 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:53:46.501993 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:54:00.820543 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:54:00.821150 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:54:01.507022 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:54:01.507564 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:54:09.635552 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:54:09.636170 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:54:09.636700 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:54:09.636720 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0204 17:54:15.829188 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:54:15.829756 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:54:16.019435 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:54:16.511295 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:54:16.511847 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:54:26.240557 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:54:30.833813 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:54:30.834444 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:54:31.515483 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:54:31.516087 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:54:45.842485 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:54:45.843073 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:54:46.520608 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:54:46.521170 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:55:00.847709 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:55:00.848367 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:55:01.525138 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:55:01.525692 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:55:08.365275 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:55:08.365906 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:55:08.366461 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:55:08.366485 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0204 17:55:15.857628 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:55:15.858208 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:55:16.529904 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:55:16.530495 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:55:30.863644 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:55:30.864283 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:55:31.534957 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:55:31.535583 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:55:45.875114 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:55:45.875725 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:55:46.540345 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:55:46.540952 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:55:48.198317 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0204 17:55:58.494678 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:55:59.446059 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:55:59.446705 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:55:59.447291 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:55:59.447317 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0204 17:56:00.880299 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:56:00.880937 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:56:01.544913 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:56:01.545539 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:56:08.581478 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:56:15.888827 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:56:15.889427 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:56:16.549611 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:56:16.550169 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:56:29.129134 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:56:30.893915 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:56:30.894565 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:56:31.553874 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:56:31.554483 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:56:39.219740 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:56:45.902730 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:56:45.903350 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:56:46.558219 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:56:46.558838 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:56:47.689313 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:56:47.689974 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:56:47.690541 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:56:47.690567 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0204 17:56:59.984452 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:57:00.908687 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:57:00.909329 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:57:01.562650 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:57:01.563222 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0204 17:57:10.363130 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0204 17:57:15.918208 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:57:15.918884 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:57:16.567725 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:57:16.568275 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:57:18.049031 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:57:18.049686 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:57:18.050181 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:57:18.050207 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0204 17:57:30.922735 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:57:30.923268 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:57:31.572371 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:57:31.572944 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:57:45.935075 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:57:45.935752 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:57:46.577066 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:57:46.577728 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:58:00.940589 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:58:00.941280 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:58:01.581715 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:58:01.582347 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:58:11.592144 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0204 17:58:11.592825 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0204 17:58:11.593392 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0204 17:58:11.593443 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0204 17:58:15.951095 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:58:15.951746 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0204 17:58:16.586782 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0204 17:58:16.587346 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError"