I0213 18:01:27.630965 1 serving.go:386] Generated self-signed cert in-memory I0213 18:01:27.847429 1 controllermanager.go:185] "Starting" version="v1.32.5" I0213 18:01:27.847448 1 controllermanager.go:187] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0213 18:01:27.848546 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/etc/kubernetes/pki/front-proxy-ca.crt" I0213 18:01:27.848593 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/pki/ca.crt" I0213 18:01:27.848669 1 secure_serving.go:213] Serving securely on 127.0.0.1:10257 I0213 18:01:27.848686 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" I0213 18:01:27.848859 1 leaderelection.go:257] attempting to acquire leader lease kube-system/kube-controller-manager... E0213 18:01:29.541512 1 leaderelection.go:436] error retrieving resource lock kube-system/kube-controller-manager: leases.coordination.k8s.io "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "kube-system" I0213 18:01:32.598852 1 leaderelection.go:271] successfully acquired lease kube-system/kube-controller-manager I0213 18:01:32.599023 1 event.go:389] "Event occurred" object="kube-system/kube-controller-manager" fieldPath="" kind="Lease" apiVersion="coordination.k8s.io/v1" type="Normal" reason="LeaderElection" message="kind-mapt-control-plane_0cf1a5ff-2c14-48a9-bdbc-bbef3297d3e9 became leader" I0213 18:01:32.600293 1 controllermanager.go:765] "Started controller" controller="serviceaccount-token-controller" I0213 18:01:32.600354 1 shared_informer.go:313] Waiting for caches to sync for tokens I0213 18:01:32.614715 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-approving-controller" I0213 18:01:32.614778 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-approving-controller" name="csrapproving" I0213 18:01:32.614798 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrapproving I0213 18:01:32.615884 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-cleaner-controller" I0213 18:01:32.615966 1 cleaner.go:83] "Starting CSR cleaner controller" logger="certificatesigningrequest-cleaner-controller" I0213 18:01:32.630656 1 controllermanager.go:765] "Started controller" controller="namespace-controller" I0213 18:01:32.630732 1 namespace_controller.go:202] "Starting namespace controller" logger="namespace-controller" I0213 18:01:32.630746 1 shared_informer.go:313] Waiting for caches to sync for namespace I0213 18:01:32.637009 1 controllermanager.go:765] "Started controller" controller="replicaset-controller" I0213 18:01:32.637126 1 replica_set.go:217] "Starting controller" logger="replicaset-controller" name="replicaset" I0213 18:01:32.637140 1 shared_informer.go:313] Waiting for caches to sync for ReplicaSet I0213 18:01:32.644132 1 controllermanager.go:765] "Started controller" controller="disruption-controller" I0213 18:01:32.644222 1 disruption.go:452] "Sending events to api server." logger="disruption-controller" I0213 18:01:32.644253 1 disruption.go:463] "Starting disruption controller" logger="disruption-controller" I0213 18:01:32.644261 1 shared_informer.go:313] Waiting for caches to sync for disruption I0213 18:01:32.650266 1 controllermanager.go:765] "Started controller" controller="statefulset-controller" I0213 18:01:32.650276 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="node-route-controller" I0213 18:01:32.650394 1 stateful_set.go:166] "Starting stateful set controller" logger="statefulset-controller" I0213 18:01:32.650408 1 shared_informer.go:313] Waiting for caches to sync for stateful set I0213 18:01:32.662988 1 controllermanager.go:765] "Started controller" controller="persistentvolume-expander-controller" I0213 18:01:32.663092 1 expand_controller.go:329] "Starting expand controller" logger="persistentvolume-expander-controller" I0213 18:01:32.663106 1 shared_informer.go:313] Waiting for caches to sync for expand I0213 18:01:32.668949 1 controllermanager.go:765] "Started controller" controller="persistentvolumeclaim-protection-controller" I0213 18:01:32.668962 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="resourceclaim-controller" requiredFeatureGates=["DynamicResourceAllocation"] I0213 18:01:32.669056 1 pvc_protection_controller.go:168] "Starting PVC protection controller" logger="persistentvolumeclaim-protection-controller" I0213 18:01:32.669067 1 shared_informer.go:313] Waiting for caches to sync for PVC protection I0213 18:01:32.675219 1 controllermanager.go:765] "Started controller" controller="job-controller" I0213 18:01:32.675231 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="selinux-warning-controller" requiredFeatureGates=["SELinuxChangePolicy"] I0213 18:01:32.675259 1 job_controller.go:243] "Starting job controller" logger="job-controller" I0213 18:01:32.675270 1 shared_informer.go:313] Waiting for caches to sync for job I0213 18:01:32.701187 1 shared_informer.go:320] Caches are synced for tokens I0213 18:01:32.704434 1 controllermanager.go:765] "Started controller" controller="pod-garbage-collector-controller" I0213 18:01:32.704448 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="kube-apiserver-serving-clustertrustbundle-publisher-controller" requiredFeatureGates=["ClusterTrustBundle"] I0213 18:01:32.704553 1 gc_controller.go:99] "Starting GC controller" logger="pod-garbage-collector-controller" I0213 18:01:32.704570 1 shared_informer.go:313] Waiting for caches to sync for GC I0213 18:01:32.901541 1 controllermanager.go:765] "Started controller" controller="validatingadmissionpolicy-status-controller" I0213 18:01:32.901597 1 shared_informer.go:313] Waiting for caches to sync for validatingadmissionpolicy-status I0213 18:01:33.054866 1 controllermanager.go:765] "Started controller" controller="endpoints-controller" I0213 18:01:33.054960 1 endpoints_controller.go:182] "Starting endpoint controller" logger="endpoints-controller" I0213 18:01:33.054980 1 shared_informer.go:313] Waiting for caches to sync for endpoint I0213 18:01:33.204778 1 controllermanager.go:765] "Started controller" controller="daemonset-controller" I0213 18:01:33.204859 1 daemon_controller.go:294] "Starting daemon sets controller" logger="daemonset-controller" I0213 18:01:33.204871 1 shared_informer.go:313] Waiting for caches to sync for daemon sets I0213 18:01:33.501495 1 controllermanager.go:765] "Started controller" controller="horizontal-pod-autoscaler-controller" I0213 18:01:33.501539 1 horizontal.go:201] "Starting HPA controller" logger="horizontal-pod-autoscaler-controller" I0213 18:01:33.501554 1 shared_informer.go:313] Waiting for caches to sync for HPA I0213 18:01:33.654667 1 controllermanager.go:765] "Started controller" controller="token-cleaner-controller" I0213 18:01:33.654682 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="service-lb-controller" I0213 18:01:33.654691 1 controllermanager.go:743] "Warning: skipping controller" controller="storage-version-migrator-controller" I0213 18:01:33.654737 1 tokencleaner.go:117] "Starting token cleaner controller" logger="token-cleaner-controller" I0213 18:01:33.654748 1 shared_informer.go:313] Waiting for caches to sync for token_cleaner I0213 18:01:33.654755 1 shared_informer.go:320] Caches are synced for token_cleaner I0213 18:01:33.841143 1 controllermanager.go:765] "Started controller" controller="endpointslice-controller" I0213 18:01:33.841229 1 endpointslice_controller.go:281] "Starting endpoint slice controller" logger="endpointslice-controller" I0213 18:01:33.841241 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice I0213 18:01:34.051540 1 controllermanager.go:765] "Started controller" controller="garbage-collector-controller" I0213 18:01:34.051570 1 garbagecollector.go:144] "Starting controller" logger="garbage-collector-controller" controller="garbagecollector" I0213 18:01:34.051591 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:01:34.051615 1 graph_builder.go:351] "Running" logger="garbage-collector-controller" component="GraphBuilder" I0213 18:01:34.404715 1 range_allocator.go:112] "No Secondary Service CIDR provided. Skipping filtering out secondary service addresses" logger="node-ipam-controller" I0213 18:01:34.404746 1 controllermanager.go:765] "Started controller" controller="node-ipam-controller" I0213 18:01:34.404824 1 node_ipam_controller.go:141] "Starting ipam controller" logger="node-ipam-controller" I0213 18:01:34.404834 1 shared_informer.go:313] Waiting for caches to sync for node I0213 18:01:34.554007 1 controllermanager.go:765] "Started controller" controller="ttl-after-finished-controller" I0213 18:01:34.554030 1 ttlafterfinished_controller.go:112] "Starting TTL after finished controller" logger="ttl-after-finished-controller" I0213 18:01:34.554046 1 shared_informer.go:313] Waiting for caches to sync for TTL after finished I0213 18:01:34.703336 1 controllermanager.go:765] "Started controller" controller="root-ca-certificate-publisher-controller" I0213 18:01:34.703353 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="service-cidr-controller" requiredFeatureGates=["MultiCIDRServiceAllocator"] I0213 18:01:34.703397 1 publisher.go:107] "Starting root CA cert publisher controller" logger="root-ca-certificate-publisher-controller" I0213 18:01:34.703407 1 shared_informer.go:313] Waiting for caches to sync for crt configmap I0213 18:01:34.853421 1 controllermanager.go:765] "Started controller" controller="endpointslice-mirroring-controller" I0213 18:01:34.853491 1 endpointslicemirroring_controller.go:227] "Starting EndpointSliceMirroring controller" logger="endpointslice-mirroring-controller" I0213 18:01:34.853502 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice_mirroring I0213 18:01:35.004622 1 controllermanager.go:765] "Started controller" controller="bootstrap-signer-controller" I0213 18:01:35.004680 1 shared_informer.go:313] Waiting for caches to sync for bootstrap_signer I0213 18:01:35.153440 1 controllermanager.go:765] "Started controller" controller="persistentvolume-protection-controller" I0213 18:01:35.153491 1 pv_protection_controller.go:81] "Starting PV protection controller" logger="persistentvolume-protection-controller" I0213 18:01:35.153501 1 shared_informer.go:313] Waiting for caches to sync for PV protection I0213 18:01:35.303651 1 controllermanager.go:765] "Started controller" controller="ephemeral-volume-controller" I0213 18:01:35.303694 1 controller.go:173] "Starting ephemeral volume controller" logger="ephemeral-volume-controller" I0213 18:01:35.303701 1 shared_informer.go:313] Waiting for caches to sync for ephemeral I0213 18:01:35.454644 1 controllermanager.go:765] "Started controller" controller="replicationcontroller-controller" I0213 18:01:35.454702 1 replica_set.go:217] "Starting controller" logger="replicationcontroller-controller" name="replicationcontroller" I0213 18:01:35.454715 1 shared_informer.go:313] Waiting for caches to sync for ReplicationController I0213 18:01:35.604944 1 controllermanager.go:765] "Started controller" controller="serviceaccount-controller" I0213 18:01:35.604991 1 serviceaccounts_controller.go:114] "Starting service account controller" logger="serviceaccount-controller" I0213 18:01:35.605001 1 shared_informer.go:313] Waiting for caches to sync for service account I0213 18:01:35.755139 1 controllermanager.go:765] "Started controller" controller="deployment-controller" I0213 18:01:35.755563 1 deployment_controller.go:173] "Starting controller" logger="deployment-controller" controller="deployment" I0213 18:01:35.755582 1 shared_informer.go:313] Waiting for caches to sync for deployment I0213 18:01:35.905158 1 controllermanager.go:765] "Started controller" controller="cronjob-controller" I0213 18:01:35.905212 1 cronjob_controllerv2.go:145] "Starting cronjob controller v2" logger="cronjob-controller" I0213 18:01:35.905228 1 shared_informer.go:313] Waiting for caches to sync for cronjob I0213 18:01:36.055052 1 controllermanager.go:765] "Started controller" controller="ttl-controller" I0213 18:01:36.055084 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="cloud-node-lifecycle-controller" I0213 18:01:36.055172 1 ttl_controller.go:127] "Starting TTL controller" logger="ttl-controller" I0213 18:01:36.055185 1 shared_informer.go:313] Waiting for caches to sync for TTL I0213 18:01:36.205116 1 controllermanager.go:765] "Started controller" controller="persistentvolume-attach-detach-controller" I0213 18:01:36.205183 1 attach_detach_controller.go:338] "Starting attach detach controller" logger="persistentvolume-attach-detach-controller" I0213 18:01:36.205201 1 shared_informer.go:313] Waiting for caches to sync for attach detach I0213 18:01:36.354839 1 controllermanager.go:765] "Started controller" controller="clusterrole-aggregation-controller" I0213 18:01:36.354883 1 clusterroleaggregation_controller.go:194] "Starting ClusterRoleAggregator controller" logger="clusterrole-aggregation-controller" I0213 18:01:36.354892 1 shared_informer.go:313] Waiting for caches to sync for ClusterRoleAggregator I0213 18:01:36.654713 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="daemonsets.apps" I0213 18:01:36.654756 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="controllerrevisions.apps" I0213 18:01:36.654775 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="jobs.batch" W0213 18:01:36.654788 1 shared_informer.go:597] resyncPeriod 13h41m50.398761943s is smaller than resyncCheckPeriod 18h18m47.480367252s and the informer has already started. Changing it to 18h18m47.480367252s I0213 18:01:36.654863 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="networkpolicies.networking.k8s.io" I0213 18:01:36.654883 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpointslices.discovery.k8s.io" I0213 18:01:36.654925 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="poddisruptionbudgets.policy" I0213 18:01:36.654948 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="rolebindings.rbac.authorization.k8s.io" I0213 18:01:36.654989 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cronjobs.batch" W0213 18:01:36.654998 1 shared_informer.go:597] resyncPeriod 15h38m19.727774611s is smaller than resyncCheckPeriod 18h18m47.480367252s and the informer has already started. Changing it to 18h18m47.480367252s I0213 18:01:36.655056 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="serviceaccounts" I0213 18:01:36.655125 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="limitranges" I0213 18:01:36.655166 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ingresses.networking.k8s.io" I0213 18:01:36.655194 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="roles.rbac.authorization.k8s.io" I0213 18:01:36.655212 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="leases.coordination.k8s.io" I0213 18:01:36.655232 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpoints" I0213 18:01:36.655250 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podtemplates" I0213 18:01:36.655275 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deployments.apps" I0213 18:01:36.655313 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="horizontalpodautoscalers.autoscaling" I0213 18:01:36.655332 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="replicasets.apps" I0213 18:01:36.655367 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="csistoragecapacities.storage.k8s.io" I0213 18:01:36.655389 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="statefulsets.apps" I0213 18:01:36.655413 1 controllermanager.go:765] "Started controller" controller="resourcequota-controller" I0213 18:01:36.655430 1 resource_quota_controller.go:300] "Starting resource quota controller" logger="resourcequota-controller" I0213 18:01:36.655445 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:01:36.655426 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="storageversion-garbage-collector-controller" requiredFeatureGates=["APIServerIdentity","StorageVersionAPI"] I0213 18:01:36.655491 1 resource_quota_monitor.go:308] "QuotaMonitor running" logger="resourcequota-controller" I0213 18:01:36.701380 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-serving" I0213 18:01:36.701400 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0213 18:01:36.701404 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-serving I0213 18:01:36.701615 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-client" I0213 18:01:36.701628 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-client I0213 18:01:36.701645 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0213 18:01:36.701804 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kube-apiserver-client" I0213 18:01:36.701815 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kube-apiserver-client I0213 18:01:36.701826 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0213 18:01:36.701912 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-signing-controller" I0213 18:01:36.701956 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-legacy-unknown" I0213 18:01:36.701965 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-legacy-unknown I0213 18:01:36.701977 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0213 18:01:36.751182 1 node_lifecycle_controller.go:432] "Controller will reconcile labels" logger="node-lifecycle-controller" I0213 18:01:36.751236 1 controllermanager.go:765] "Started controller" controller="node-lifecycle-controller" I0213 18:01:36.751304 1 node_lifecycle_controller.go:466] "Sending events to api server" logger="node-lifecycle-controller" I0213 18:01:36.751318 1 node_lifecycle_controller.go:477] "Starting node controller" logger="node-lifecycle-controller" I0213 18:01:36.751325 1 shared_informer.go:313] Waiting for caches to sync for taint I0213 18:01:36.905058 1 controllermanager.go:765] "Started controller" controller="persistentvolume-binder-controller" I0213 18:01:36.905097 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="volumeattributesclass-protection-controller" requiredFeatureGates=["VolumeAttributesClass"] I0213 18:01:36.905161 1 pv_controller_base.go:308] "Starting persistent volume controller" logger="persistentvolume-binder-controller" I0213 18:01:36.905174 1 shared_informer.go:313] Waiting for caches to sync for persistent volume I0213 18:01:37.055042 1 controllermanager.go:765] "Started controller" controller="legacy-serviceaccount-token-cleaner-controller" I0213 18:01:37.055104 1 legacy_serviceaccount_token_cleaner.go:103] "Starting legacy service account token cleaner controller" logger="legacy-serviceaccount-token-cleaner-controller" I0213 18:01:37.055112 1 shared_informer.go:313] Waiting for caches to sync for legacy-service-account-token-cleaner I0213 18:01:37.101195 1 controllermanager.go:765] "Started controller" controller="taint-eviction-controller" I0213 18:01:37.101261 1 taint_eviction.go:281] "Starting" logger="taint-eviction-controller" controller="taint-eviction-controller" I0213 18:01:37.101307 1 taint_eviction.go:287] "Sending events to api server" logger="taint-eviction-controller" I0213 18:01:37.101329 1 shared_informer.go:313] Waiting for caches to sync for taint-eviction-controller I0213 18:01:37.103397 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:01:37.107634 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"kind-mapt-control-plane\" does not exist" I0213 18:01:37.109764 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:01:37.115040 1 shared_informer.go:320] Caches are synced for certificate-csrapproving I0213 18:01:37.131472 1 shared_informer.go:320] Caches are synced for namespace I0213 18:01:37.137711 1 shared_informer.go:320] Caches are synced for ReplicaSet I0213 18:01:37.141891 1 shared_informer.go:320] Caches are synced for endpoint_slice I0213 18:01:37.145212 1 shared_informer.go:320] Caches are synced for disruption I0213 18:01:37.150451 1 shared_informer.go:320] Caches are synced for stateful set I0213 18:01:37.151606 1 shared_informer.go:320] Caches are synced for taint I0213 18:01:37.151671 1 node_lifecycle_controller.go:1234] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone="" I0213 18:01:37.151754 1 node_lifecycle_controller.go:886] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="kind-mapt-control-plane" I0213 18:01:37.151776 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:01:37.151784 1 node_lifecycle_controller.go:1038] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller" I0213 18:01:37.151789 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller" I0213 18:01:37.151794 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller" I0213 18:01:37.153692 1 shared_informer.go:320] Caches are synced for PV protection I0213 18:01:37.153788 1 shared_informer.go:320] Caches are synced for endpoint_slice_mirroring I0213 18:01:37.155002 1 shared_informer.go:320] Caches are synced for ReplicationController I0213 18:01:37.155018 1 shared_informer.go:320] Caches are synced for ClusterRoleAggregator I0213 18:01:37.155030 1 shared_informer.go:320] Caches are synced for TTL after finished I0213 18:01:37.155105 1 shared_informer.go:320] Caches are synced for endpoint I0213 18:01:37.155139 1 shared_informer.go:320] Caches are synced for legacy-service-account-token-cleaner I0213 18:01:37.155487 1 shared_informer.go:320] Caches are synced for TTL I0213 18:01:37.155607 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:01:37.155849 1 shared_informer.go:320] Caches are synced for deployment I0213 18:01:37.163431 1 shared_informer.go:320] Caches are synced for expand I0213 18:01:37.169680 1 shared_informer.go:320] Caches are synced for PVC protection I0213 18:01:37.175944 1 shared_informer.go:320] Caches are synced for job I0213 18:01:37.201808 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-serving I0213 18:01:37.201838 1 shared_informer.go:320] Caches are synced for validatingadmissionpolicy-status I0213 18:01:37.201823 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-client I0213 18:01:37.201842 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kube-apiserver-client I0213 18:01:37.201853 1 shared_informer.go:320] Caches are synced for taint-eviction-controller I0213 18:01:37.201899 1 shared_informer.go:320] Caches are synced for HPA I0213 18:01:37.202055 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-legacy-unknown I0213 18:01:37.204235 1 shared_informer.go:320] Caches are synced for crt configmap I0213 18:01:37.204244 1 shared_informer.go:320] Caches are synced for ephemeral I0213 18:01:37.204292 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:01:37.205355 1 shared_informer.go:320] Caches are synced for persistent volume I0213 18:01:37.205363 1 shared_informer.go:320] Caches are synced for GC I0213 18:01:37.205397 1 shared_informer.go:320] Caches are synced for cronjob I0213 18:01:37.205406 1 shared_informer.go:320] Caches are synced for service account I0213 18:01:37.205412 1 shared_informer.go:320] Caches are synced for attach detach I0213 18:01:37.205398 1 shared_informer.go:320] Caches are synced for daemon sets I0213 18:01:37.205480 1 shared_informer.go:320] Caches are synced for node I0213 18:01:37.205506 1 shared_informer.go:320] Caches are synced for bootstrap_signer I0213 18:01:37.205523 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller" I0213 18:01:37.205560 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller" I0213 18:01:37.205566 1 shared_informer.go:313] Waiting for caches to sync for cidrallocator I0213 18:01:37.205570 1 shared_informer.go:320] Caches are synced for cidrallocator I0213 18:01:37.210712 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:01:37.213830 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="kind-mapt-control-plane" podCIDRs=["10.244.0.0/24"] I0213 18:01:37.213849 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:01:37.213870 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:01:38.008984 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:01:38.319975 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="361.083758ms" I0213 18:01:38.323062 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="361.348265ms" I0213 18:01:38.325920 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="5.893346ms" I0213 18:01:38.326143 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="40.401µs" I0213 18:01:38.329403 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="6.29092ms" I0213 18:01:38.329482 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="49.672µs" I0213 18:01:38.335209 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="30.786µs" I0213 18:01:50.451374 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:01:50.458973 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:01:50.464457 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="64.657µs" I0213 18:01:50.467795 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="43.703µs" I0213 18:01:50.467853 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="42.423µs" I0213 18:01:50.478838 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="69.779µs" I0213 18:01:50.488900 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="43.258µs" I0213 18:01:50.496265 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="49.697µs" I0213 18:01:52.153313 1 node_lifecycle_controller.go:1057] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller" I0213 18:01:57.501356 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="10.208907ms" I0213 18:01:57.501418 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="31.879µs" I0213 18:01:57.511094 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="7.444586ms" I0213 18:01:57.511152 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="31.275µs" I0213 18:01:57.526508 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="7.588303ms" I0213 18:01:57.526591 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="54.624µs" I0213 18:02:14.125450 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:24.251178 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:30.653410 1 namespace_controller.go:187] "Namespace has been deleted" logger="namespace-controller" namespace="test-pvc-ns" I0213 18:02:34.276665 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="26.991137ms" I0213 18:02:34.282621 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="5.91871ms" I0213 18:02:34.282680 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="36.488µs" I0213 18:02:34.285860 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="50.132µs" I0213 18:02:34.441879 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="16.130827ms" I0213 18:02:34.450009 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="8.085211ms" I0213 18:02:34.450108 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="69.908µs" I0213 18:02:34.453119 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="56.903µs" I0213 18:02:34.539530 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:34.621722 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="17.680005ms" I0213 18:02:34.630579 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="8.815034ms" I0213 18:02:34.630649 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="42.922µs" I0213 18:02:34.634866 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="51.704µs" I0213 18:02:34.773980 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="0s" I0213 18:02:34.783442 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:34.797259 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:34.797312 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:34.809992 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:37.211564 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="orders.acme.cert-manager.io" I0213 18:02:37.211593 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificates.cert-manager.io" I0213 18:02:37.211608 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificaterequests.cert-manager.io" I0213 18:02:37.211622 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="challenges.acme.cert-manager.io" I0213 18:02:37.211642 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="issuers.cert-manager.io" I0213 18:02:37.211692 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:02:37.218397 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:02:37.569102 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="6.028231ms" I0213 18:02:37.569169 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="41.549µs" I0213 18:02:38.312224 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:02:38.319412 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:02:39.577707 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="9.124428ms" I0213 18:02:39.577783 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="44.46µs" I0213 18:02:41.575976 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="68.955µs" I0213 18:02:42.578300 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:43.585693 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:44.706160 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:02:47.588222 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="8.502333ms" I0213 18:02:47.588308 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="55.844µs" I0213 18:02:50.404125 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="24.998423ms" I0213 18:02:50.410909 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="6.751214ms" I0213 18:02:50.410980 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="45.872µs" I0213 18:02:50.414252 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="49.347µs" I0213 18:02:52.593454 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:53.601147 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:53.685219 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:54.600979 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="74.293µs" I0213 18:02:54.605653 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:54.610407 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0213 18:02:57.611022 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="55.995µs" I0213 18:03:05.348572 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:03:05.625229 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="8.83604ms" I0213 18:03:05.625313 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="51.965µs" I0213 18:03:08.324700 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:03:09.425584 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:03:11.434526 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="19.754554ms" I0213 18:03:11.440922 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="6.358373ms" I0213 18:03:11.441001 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="43.238µs" I0213 18:03:11.444483 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="36.309µs" I0213 18:03:11.611828 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="20.551315ms" I0213 18:03:11.621015 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="9.14818ms" I0213 18:03:11.621100 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="56.125µs" I0213 18:03:15.649702 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="6.405281ms" I0213 18:03:15.649775 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="45.11µs" I0213 18:03:17.651563 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="4.910088ms" I0213 18:03:17.651621 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="28.556µs" I0213 18:03:25.548380 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:03:35.854207 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:03:37.201433 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="23.016915ms" I0213 18:03:37.209493 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="8.022226ms" I0213 18:03:37.209588 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="58.571µs" I0213 18:03:37.215386 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="70.585µs" I0213 18:03:37.311817 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="19.950552ms" I0213 18:03:37.318256 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="6.403644ms" I0213 18:03:37.318334 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="49.054µs" I0213 18:03:37.321711 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="47.304µs" I0213 18:03:37.608324 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="17.218751ms" I0213 18:03:37.627352 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="18.991151ms" I0213 18:03:37.627428 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="50.977µs" I0213 18:03:37.710023 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="20.74107ms" I0213 18:03:37.718297 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="8.238512ms" I0213 18:03:37.718373 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="50.473µs" I0213 18:03:37.722318 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="54.421µs" I0213 18:03:37.804174 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="16.278176ms" I0213 18:03:37.812009 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="7.793622ms" I0213 18:03:37.812086 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="52.705µs" I0213 18:03:37.815434 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="46.609µs" I0213 18:03:38.319720 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelineruns.tekton.dev" I0213 18:03:38.319752 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="verificationpolicies.tekton.dev" I0213 18:03:38.319768 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelines.tekton.dev" I0213 18:03:38.319788 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="tasks.tekton.dev" I0213 18:03:38.319808 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="taskruns.tekton.dev" I0213 18:03:38.319832 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="resolutionrequests.resolution.tekton.dev" I0213 18:03:38.319855 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="customruns.tekton.dev" I0213 18:03:38.319874 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="stepactions.tekton.dev" I0213 18:03:38.319953 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:03:39.431604 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:03:39.521108 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:03:39.532298 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:03:40.690856 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="58.961µs" I0213 18:03:42.695374 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="73.949µs" I0213 18:03:44.706700 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="5.53454ms" I0213 18:03:44.706798 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="61.568µs" I0213 18:03:46.708767 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="73.233µs" W0213 18:03:48.490754 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:03:48.496483 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:03:48.716890 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="5.417669ms" I0213 18:03:48.716957 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="43.723µs" W0213 18:03:49.381633 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:03:49.387054 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:03:51.700511 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="6.076346ms" I0213 18:03:51.700657 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="112.836µs" I0213 18:03:53.703726 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="5.233811ms" I0213 18:03:53.703827 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="63.497µs" I0213 18:03:57.714417 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="5.064964ms" I0213 18:03:57.714528 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="67.354µs" W0213 18:04:03.503084 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:03.503653 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:04:04.389962 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:04.390551 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:04:04.487683 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="16.36213ms" I0213 18:04:04.495523 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="7.799272ms" I0213 18:04:04.495600 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="50.039µs" I0213 18:04:04.498680 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="41.175µs" I0213 18:04:04.552858 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="22.097439ms" I0213 18:04:04.562008 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="9.121417ms" I0213 18:04:04.562104 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="69.182µs" I0213 18:04:04.565100 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="73.64µs" I0213 18:04:04.686760 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="15.700443ms" I0213 18:04:04.695414 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="8.61956ms" I0213 18:04:04.695488 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="52.793µs" I0213 18:04:04.698387 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="44.476µs" I0213 18:04:06.647409 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:04:07.762244 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="9.373645ms" I0213 18:04:07.762346 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="62.877µs" I0213 18:04:09.525322 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="eventlisteners.triggers.tekton.dev" I0213 18:04:09.525354 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggertemplates.triggers.tekton.dev" I0213 18:04:09.525370 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggerbindings.triggers.tekton.dev" I0213 18:04:09.525389 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggers.triggers.tekton.dev" I0213 18:04:09.525407 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="interceptors.triggers.tekton.dev" I0213 18:04:09.525475 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:04:09.539613 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:04:09.625853 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:04:09.640206 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:04:09.761428 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="5.490072ms" I0213 18:04:09.761510 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="54.317µs" I0213 18:04:11.762398 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="57.162µs" W0213 18:04:18.507889 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:18.508505 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:04:19.394786 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:19.395395 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:04:22.782923 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="9.297858ms" I0213 18:04:22.783058 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="62.23µs" I0213 18:04:24.505309 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="20.835712ms" I0213 18:04:24.513204 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="7.854268ms" I0213 18:04:24.513312 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="71.393µs" I0213 18:04:24.519458 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="63.38µs" I0213 18:04:27.799413 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="5.37378ms" I0213 18:04:27.799502 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="64.447µs" I0213 18:04:30.039333 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-89fb4cd9b" duration="16.654915ms" I0213 18:04:30.047903 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-89fb4cd9b" duration="8.529551ms" I0213 18:04:30.047981 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-89fb4cd9b" duration="52.674µs" I0213 18:04:30.051428 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-89fb4cd9b" duration="65.154µs" I0213 18:04:30.103429 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="16.518506ms" I0213 18:04:30.123631 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="20.163973ms" I0213 18:04:30.123703 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="43.923µs" I0213 18:04:30.259624 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="16.703898ms" I0213 18:04:30.268566 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="8.902596ms" I0213 18:04:30.268655 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="57.608µs" I0213 18:04:30.275403 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="57.028µs" W0213 18:04:33.515975 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:33.516594 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:04:33.812380 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-89fb4cd9b" duration="67.645µs" W0213 18:04:34.399555 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:34.400119 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:04:35.837441 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="5.378163ms" I0213 18:04:35.837522 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="52.27µs" I0213 18:04:37.651333 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:04:37.844424 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="6.073676ms" I0213 18:04:37.844507 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="55.504µs" W0213 18:04:48.521415 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:48.522018 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:04:49.404522 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:04:49.405064 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:05:00.360483 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-89fb4cd9b" duration="64.104µs" I0213 18:05:00.378427 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-89fb4cd9b" duration="9.177425ms" I0213 18:05:00.378560 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-89fb4cd9b" duration="96.405µs" W0213 18:05:03.529100 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:03.529703 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:05:04.409874 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:04.410453 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:05:04.641243 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="19.732092ms" I0213 18:05:04.649475 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="8.189909ms" I0213 18:05:04.649558 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="51.796µs" I0213 18:05:04.659531 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="62.494µs" I0213 18:05:07.902173 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="96.549µs" I0213 18:05:07.914112 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="5.04957ms" I0213 18:05:07.914185 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="45.111µs" I0213 18:05:08.004854 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:05:09.632848 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="extensions.dashboard.tekton.dev" I0213 18:05:09.632899 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:05:09.650142 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:05:09.650190 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:05:09.733869 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:05:12.904337 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="12.832136ms" I0213 18:05:12.917141 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="12.77017ms" I0213 18:05:12.917244 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="69.354µs" I0213 18:05:12.924320 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="61.419µs" I0213 18:05:13.927374 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="9.03539ms" I0213 18:05:13.927471 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="63.92µs" I0213 18:05:13.952340 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="17.43436ms" I0213 18:05:13.961535 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="9.159246ms" I0213 18:05:13.961626 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="56.889µs" I0213 18:05:14.246121 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="112.029µs" E0213 18:05:14.372401 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"rbac.authorization.k8s.io/v1\", Kind:\"Role\", Name:\"tekton-dashboard-info\", UID:\"629d9a03-f345-43d4-8fdf-c2de6bdf0a77\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"tekton-pipelines\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-static-7xxzj\", UID:\"ce655df5-fb6a-481f-b8cd-1919546b1dad\", Controller:(*bool)(0xc001cf683b), BlockOwnerDeletion:(*bool)(0xc001cf683c)}}}: roles.rbac.authorization.k8s.io \"tekton-dashboard-info\" not found" logger="UnhandledError" I0213 18:05:14.555029 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="4.560902ms" I0213 18:05:14.555134 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="75.105µs" I0213 18:05:14.829846 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="4.90301ms" I0213 18:05:14.829956 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="74.078µs" I0213 18:05:14.924148 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="101.71µs" I0213 18:05:14.932356 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="71.646µs" I0213 18:05:14.944286 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="60.288µs" E0213 18:05:14.988850 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:05:15.510414 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="19.027694ms" I0213 18:05:15.517863 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="7.411299ms" I0213 18:05:15.517948 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="57.412µs" I0213 18:05:15.521676 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="54.536µs" I0213 18:05:15.931232 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="75.846µs" I0213 18:05:15.938528 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="57.587µs" I0213 18:05:15.951560 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="9.527µs" W0213 18:05:16.113385 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:05:16.114018 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:05:16.114553 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:05:16.114579 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:05:16.939053 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="9.063453ms" I0213 18:05:16.939182 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="78.028µs" I0213 18:05:16.962661 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="16.920227ms" I0213 18:05:16.971423 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="8.725679ms" I0213 18:05:16.971518 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="64.197µs" I0213 18:05:17.110006 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="45.616µs" I0213 18:05:17.129500 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="19.719578ms" I0213 18:05:17.136913 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="7.378785ms" I0213 18:05:17.136982 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="41.745µs" I0213 18:05:17.137021 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="22.558µs" I0213 18:05:17.143552 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="61.646µs" I0213 18:05:17.300003 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="15.491176ms" I0213 18:05:17.308243 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="8.205808ms" I0213 18:05:17.308466 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="94.301µs" I0213 18:05:17.316568 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="62.874µs" I0213 18:05:17.478862 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="17.121949ms" I0213 18:05:17.487952 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="9.049243ms" I0213 18:05:17.488028 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="44.002µs" I0213 18:05:17.495092 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="48.588µs" W0213 18:05:17.861978 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:05:17.862660 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:05:17.863251 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:05:17.863273 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:05:17.936381 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="53.806µs" I0213 18:05:17.944479 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="52.915µs" W0213 18:05:18.534059 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:18.534703 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:05:19.414296 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:19.414896 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:05:20.052940 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="0s" I0213 18:05:20.057223 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:05:20.062510 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:05:20.062511 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:05:20.070527 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:05:20.942543 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="76.622µs" I0213 18:05:21.578294 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-f96ccdb66" duration="11.31232ms" I0213 18:05:21.582981 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-f96ccdb66" duration="4.654821ms" I0213 18:05:21.583033 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-f96ccdb66" duration="29.937µs" I0213 18:05:21.589032 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-f96ccdb66" duration="28.204µs" I0213 18:05:21.955737 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="8.548475ms" I0213 18:05:21.955812 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="49.095µs" W0213 18:05:22.941135 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:05:22.941711 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:05:22.942399 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:05:22.942427 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:05:23.507102 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="12.851134ms" I0213 18:05:23.513663 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="6.514059ms" I0213 18:05:23.513731 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="34.929µs" I0213 18:05:23.518374 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="84.88µs" I0213 18:05:23.952872 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="66.938µs" I0213 18:05:26.971985 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="8.638861ms" I0213 18:05:26.972051 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="45.42µs" W0213 18:05:31.666440 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:05:31.667103 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:05:31.667675 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:05:31.667703 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:05:33.542780 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:33.543341 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:05:34.418840 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:34.419424 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:05:34.963347 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="6.322048ms" I0213 18:05:34.963424 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="47.856µs" I0213 18:05:39.182147 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:05:39.657728 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:05:39.737986 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="repositories.pipelinesascode.tekton.dev" I0213 18:05:39.738029 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:05:39.738050 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:05:39.758382 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:05:46.010094 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" W0213 18:05:46.469751 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:05:46.470444 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:05:46.471006 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:05:46.471041 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:05:47.035373 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" W0213 18:05:48.547956 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:48.548593 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:05:49.423952 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:05:49.424597 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:05:50.034859 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-f96ccdb66" duration="44.62µs" I0213 18:05:51.033930 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:05:52.040917 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:05:53.043899 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:05:53.060139 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="8.296835ms" I0213 18:05:53.060208 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="41.16µs" I0213 18:05:53.734005 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="19.087126ms" I0213 18:05:53.750135 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="16.089911ms" I0213 18:05:53.750199 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="34.122µs" I0213 18:05:53.759859 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="40.547µs" I0213 18:05:54.050145 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:05:59.619375 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:06:01.041276 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-f96ccdb66" duration="5.447701ms" I0213 18:06:01.041363 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-f96ccdb66" duration="48.718µs" I0213 18:06:01.076958 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="5.495318ms" I0213 18:06:01.077033 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="42.4µs" I0213 18:06:02.012929 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="19.926194ms" I0213 18:06:02.021388 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="8.416189ms" I0213 18:06:02.021476 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="55.389µs" I0213 18:06:02.025892 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="49.296µs" I0213 18:06:02.070775 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="14.360156ms" I0213 18:06:02.084573 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="13.757231ms" I0213 18:06:02.084663 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="55.211µs" I0213 18:06:02.094524 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="52.14µs" I0213 18:06:02.126139 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-bfd46c7d6" duration="6.526063ms" I0213 18:06:02.126209 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-bfd46c7d6" duration="44.172µs" I0213 18:06:02.194147 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-7dcdc4cc4d" duration="11.31616ms" I0213 18:06:02.194220 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-7dcdc4cc4d" duration="44.35µs" W0213 18:06:03.556605 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:03.557293 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:06:04.429124 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:04.429737 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:06:06.203023 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="78.536µs" I0213 18:06:08.100529 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="6.160627ms" I0213 18:06:08.100609 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="49.138µs" I0213 18:06:08.103125 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="80.052µs" I0213 18:06:09.099729 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:09.649237 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:06:09.743309 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicetokens.dex.coreos.com" I0213 18:06:09.743343 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedgeneratingpolicies.policies.kyverno.io" I0213 18:06:09.743361 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authcodes.dex.coreos.com" I0213 18:06:09.743386 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cleanuppolicies.kyverno.io" I0213 18:06:09.743406 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicerequests.dex.coreos.com" I0213 18:06:09.743427 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.kyverno.io" I0213 18:06:09.743450 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="passwords.dex.coreos.com" I0213 18:06:09.743473 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespaceddeletingpolicies.policies.kyverno.io" I0213 18:06:09.743495 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="connectors.dex.coreos.com" I0213 18:06:09.743512 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="updaterequests.kyverno.io" I0213 18:06:09.743538 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policies.kyverno.io" I0213 18:06:09.743555 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.policies.kyverno.io" I0213 18:06:09.743571 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ephemeralreports.reports.kyverno.io" I0213 18:06:09.743591 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="signingkeies.dex.coreos.com" I0213 18:06:09.743609 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="refreshtokens.dex.coreos.com" I0213 18:06:09.743626 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedvalidatingpolicies.policies.kyverno.io" I0213 18:06:09.743652 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="offlinesessionses.dex.coreos.com" I0213 18:06:09.743676 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyreports.wgpolicyk8s.io" I0213 18:06:09.743694 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="oauth2clients.dex.coreos.com" I0213 18:06:09.743714 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedmutatingpolicies.policies.kyverno.io" I0213 18:06:09.743732 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authrequests.dex.coreos.com" I0213 18:06:09.743745 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedimagevalidatingpolicies.policies.kyverno.io" I0213 18:06:09.743999 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:06:09.767014 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:06:10.104313 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:10.105388 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:10.261999 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:10.944161 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:06:11.067590 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:06:11.109324 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:11.115030 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0213 18:06:11.115106 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="81.359µs" W0213 18:06:13.628944 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:06:13.629599 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:06:13.630146 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:06:13.630170 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:06:18.561567 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:18.562258 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:06:19.434067 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:19.434749 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:06:20.334713 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="63.69µs" I0213 18:06:20.359718 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="9.360552ms" I0213 18:06:20.359792 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="48.695µs" W0213 18:06:33.571646 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:33.572280 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:06:34.439359 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:34.439955 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:06:40.029157 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:06:40.948724 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshots.appstudio.redhat.com" I0213 18:06:40.948752 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentdetectionqueries.appstudio.redhat.com" I0213 18:06:40.948765 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargets.appstudio.redhat.com" I0213 18:06:40.948779 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="enterprisecontractpolicies.appstudio.redhat.com" I0213 18:06:40.948795 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="environments.appstudio.redhat.com" I0213 18:06:40.948814 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="applications.appstudio.redhat.com" I0213 18:06:40.948835 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshotenvironmentbindings.appstudio.redhat.com" I0213 18:06:40.948848 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="components.appstudio.redhat.com" I0213 18:06:40.948858 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="promotionruns.appstudio.redhat.com" I0213 18:06:40.948868 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargetclaims.appstudio.redhat.com" I0213 18:06:40.948974 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:06:41.075667 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:06:42.049337 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:06:42.175789 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:06:46.577449 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="21.255923ms" I0213 18:06:46.585454 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="7.971005ms" I0213 18:06:46.585521 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="35.163µs" I0213 18:06:46.592132 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="65.842µs" W0213 18:06:48.576740 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:48.577407 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:06:49.444003 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:06:49.444615 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:06:50.206673 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="47.832µs" I0213 18:06:57.981416 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-69dd4cd86b" duration="19.548365ms" I0213 18:06:57.989655 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-69dd4cd86b" duration="8.198886ms" I0213 18:06:57.989719 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-69dd4cd86b" duration="30.182µs" I0213 18:06:57.996043 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-69dd4cd86b" duration="35.887µs" I0213 18:07:00.315813 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:07:01.216512 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="5.844357ms" I0213 18:07:01.216581 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-7fd8bb956d" duration="42.316µs" I0213 18:07:01.237624 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-69dd4cd86b" duration="39.652µs" W0213 18:07:03.586484 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:03.587044 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:07:04.449331 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:04.449929 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:07:06.415205 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="19.438039ms" I0213 18:07:06.423698 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="8.418673ms" I0213 18:07:06.423766 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="40.418µs" I0213 18:07:06.423789 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="12.327µs" I0213 18:07:06.433383 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="33.17µs" W0213 18:07:06.551206 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:07:06.551823 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:07:06.552437 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:07:06.552464 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:07:08.977065 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="22.159955ms" I0213 18:07:08.985784 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="8.649819ms" I0213 18:07:08.985869 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="34.208µs" I0213 18:07:08.997019 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="37.444µs" I0213 18:07:10.531166 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:07:12.053979 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalservicesconfigs.appstudio.redhat.com" I0213 18:07:12.054018 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalrequests.appstudio.redhat.com" I0213 18:07:12.054051 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentgroups.appstudio.redhat.com" I0213 18:07:12.054105 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releases.appstudio.redhat.com" I0213 18:07:12.054119 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseserviceconfigs.appstudio.redhat.com" I0213 18:07:12.054138 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplanadmissions.appstudio.redhat.com" I0213 18:07:12.054154 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="integrationtestscenarios.appstudio.redhat.com" I0213 18:07:12.054174 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplans.appstudio.redhat.com" I0213 18:07:12.054265 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:07:12.184465 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:07:12.246590 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-69dd4cd86b" duration="4.846855ms" I0213 18:07:12.246649 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-69dd4cd86b" duration="33.284µs" I0213 18:07:12.267998 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="53.829µs" I0213 18:07:12.427648 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="15.074925ms" I0213 18:07:12.433994 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="6.306486ms" I0213 18:07:12.434062 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="38.282µs" I0213 18:07:12.443044 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="55.778µs" I0213 18:07:13.085367 1 shared_informer.go:320] Caches are synced for garbage collector I0213 18:07:13.155171 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:07:14.278835 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="47.441µs" I0213 18:07:14.300130 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="11.156817ms" I0213 18:07:14.300192 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="33.447µs" W0213 18:07:18.829202 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:18.829801 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:07:19.454050 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:19.454653 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:07:20.296248 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="46.493µs" I0213 18:07:21.299218 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="62.27µs" I0213 18:07:23.285559 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="11.506997ms" I0213 18:07:23.285624 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="38.426µs" I0213 18:07:27.315596 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="62.108µs" I0213 18:07:29.323514 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="56.052µs" W0213 18:07:33.986279 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:33.986904 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:07:34.460259 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:34.460825 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:07:36.343939 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="61.719µs" I0213 18:07:41.342953 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:07:48.993262 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:48.996281 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:07:49.464735 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:07:49.465403 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:07:50.302300 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:07:50.302881 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:07:50.303428 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:07:50.303451 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:07:51.433772 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:07:54.035731 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="0s" I0213 18:08:01.455920 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:08:04.005457 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:04.006022 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:08:04.469896 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:04.470475 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:08:07.359389 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="8.059494ms" I0213 18:08:07.359455 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="41.48µs" I0213 18:08:11.583343 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:08:19.010762 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:19.011450 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:08:19.475146 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:19.475710 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:08:21.690462 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:08:34.019726 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:34.020291 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:08:34.480176 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:34.480728 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:08:37.368705 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="8.229189ms" I0213 18:08:37.368781 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-7678db8748" duration="44.675µs" W0213 18:08:37.756574 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:08:37.757215 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:08:37.757716 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:08:37.757746 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:08:45.592458 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7f9c769f45" duration="22.302865ms" I0213 18:08:45.601422 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7f9c769f45" duration="8.931175ms" I0213 18:08:45.601494 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7f9c769f45" duration="39.455µs" I0213 18:08:45.610725 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7f9c769f45" duration="33.085µs" W0213 18:08:49.024520 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:49.025146 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:08:49.484900 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:08:49.485575 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:08:52.238496 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:08:55.539855 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7f9c769f45" duration="41.505µs" I0213 18:09:02.387517 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:09:04.034433 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:04.035003 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:04.490029 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:04.490606 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:09:06.550185 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7f9c769f45" duration="5.411899ms" I0213 18:09:06.550253 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7f9c769f45" duration="36.588µs" I0213 18:09:07.819314 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-597f7758b6" duration="19.383497ms" I0213 18:09:07.826920 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-597f7758b6" duration="7.561387ms" I0213 18:09:07.827004 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-597f7758b6" duration="48.902µs" I0213 18:09:07.833481 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-597f7758b6" duration="32.139µs" I0213 18:09:10.590005 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-597f7758b6" duration="6.757065ms" I0213 18:09:10.590095 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-597f7758b6" duration="59.404µs" I0213 18:09:10.613263 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="16.388744ms" I0213 18:09:10.623206 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="9.899961ms" I0213 18:09:10.623281 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="40.772µs" I0213 18:09:11.014569 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="51.564µs" I0213 18:09:11.589228 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="49.014µs" I0213 18:09:11.593277 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-56c9f9fdd8" duration="44.919µs" W0213 18:09:12.477673 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:09:12.478334 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:09:12.478898 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:09:12.478921 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:09:12.729109 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:09:13.107757 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0213 18:09:13.173066 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="imagerepositories.appstudio.redhat.com" I0213 18:09:13.173142 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0213 18:09:13.173175 1 shared_informer.go:320] Caches are synced for resource quota I0213 18:09:13.208377 1 shared_informer.go:320] Caches are synced for garbage collector W0213 18:09:19.039161 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:19.039793 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:19.494386 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:19.494931 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:34.047556 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:34.048127 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:34.498988 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:34.499621 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:49.052759 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:49.053719 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:49.505776 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:09:49.506367 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:09:50.899197 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:09:50.899831 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:09:50.900395 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:09:50.900417 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:10:04.061595 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:04.062153 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:04.510485 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:04.511100 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:19.066676 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:19.067311 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:19.515585 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:19.516200 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:34.076140 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:34.076747 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:34.520058 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:34.520680 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:48.354030 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:10:48.354696 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:10:48.355269 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:10:48.355291 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:10:49.080886 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:49.081503 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:10:49.524714 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:10:49.525326 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:04.089139 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:04.089685 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:04.529184 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:04.529695 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:19.094778 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:19.095485 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:19.533706 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:19.534328 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:11:25.399499 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:11:34.103733 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:34.104357 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:34.538497 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:34.539047 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:11:35.789950 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:11:42.343652 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:11:42.344335 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:11:42.344868 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:11:42.344894 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:11:45.927289 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:11:49.109544 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:49.110222 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:11:49.542736 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:11:49.543314 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:11:56.281021 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:12:04.118717 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:04.119315 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:04.547881 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:04.548403 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:12:06.432919 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:12:16.690398 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:12:19.124151 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:19.124786 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:19.552984 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:19.553657 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:21.422282 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:12:21.422892 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:12:21.423439 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:12:21.423460 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:12:27.047247 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:12:34.133211 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:34.133828 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:34.559145 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:34.559695 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:12:37.071920 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:12:49.138379 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:49.139012 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:49.563856 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:12:49.564406 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:12:58.034920 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:12:58.035527 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:12:58.036013 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:12:58.036033 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:13:04.147724 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:04.148407 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:04.568575 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:04.569140 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:13:07.857669 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:13:18.019486 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:13:19.153130 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:19.153709 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:19.573639 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:19.574222 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:13:28.077165 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:13:28.802562 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:13:28.803262 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:13:28.803839 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:13:28.803861 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:13:34.163306 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:34.163868 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:34.578178 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:34.578746 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:13:48.392564 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:13:49.168658 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:49.169300 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:13:49.582484 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:13:49.583125 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:13:58.645916 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:14:04.176926 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:04.177492 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:04.587159 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:04.587747 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:14.981157 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:14:14.981775 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:14:14.982295 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:14:14.982323 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:14:19.101466 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:14:19.182260 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:19.182862 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:19.592317 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:19.592900 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:14:29.129812 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:14:34.191382 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:34.191959 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:14:34.596798 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:34.597404 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:14:39.327258 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:14:49.196236 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:49.196835 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:14:49.414263 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:14:49.601520 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:14:49.602143 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:04.204708 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:04.205301 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:04.605889 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:04.606499 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:12.437864 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:15:12.438487 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:15:12.439056 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:15:12.439098 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:15:19.209696 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:19.210354 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:19.610697 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:19.611250 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:15:30.114666 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:15:34.218091 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:34.218709 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:34.615563 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:34.616106 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:15:40.389004 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:15:49.223356 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:49.223956 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:15:49.620504 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:15:49.621102 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:15:50.457696 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:15:59.385026 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:15:59.385707 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:15:59.386308 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:15:59.386330 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:16:00.849767 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:16:04.232090 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:04.232698 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:04.625508 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:04.626119 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:19.237429 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:19.237995 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:19.629502 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:19.630052 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:30.743509 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:16:30.744145 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:16:30.744720 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:16:30.744747 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:16:31.531260 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:16:34.246307 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:34.246875 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:34.634390 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:34.634961 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:16:41.915094 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:16:49.251821 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:49.252426 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:16:49.639262 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:16:49.639840 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:04.260623 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:04.261194 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:04.643599 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:04.644178 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:15.524937 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:17:15.525575 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:17:15.526133 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:17:15.526156 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:17:19.265908 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:19.266542 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:19.648764 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:19.649389 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:17:32.544012 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:17:34.274296 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:34.274875 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:34.654883 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:34.655456 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:17:42.818262 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:17:49.279352 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:49.279990 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:17:49.659850 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:17:49.660527 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:17:52.859893 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:17:54.545299 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:17:54.545904 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:17:54.546453 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:17:54.546477 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:18:03.085272 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:18:04.288388 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:04.288919 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:04.664756 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:04.665337 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:19.292869 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:19.293552 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:19.669296 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:19.669907 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:18:23.228574 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:18:33.276535 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:18:34.302090 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:34.302674 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:34.673530 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:34.674055 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:48.079538 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:18:48.080191 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:18:48.080728 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:18:48.080756 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:18:49.308287 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:49.308873 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:18:49.678355 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:18:49.678909 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:19:04.317214 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:19:04.317806 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:19:04.683017 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:19:04.683641 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:19:19.322881 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:19:19.323487 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:19:19.687966 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:19:19.688540 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:19:27.385480 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:19:27.386139 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:19:27.386676 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:19:27.386700 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:19:34.332175 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:19:34.332756 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:19:34.462376 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:19:34.695486 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:19:34.696045 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:19:44.496358 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:19:49.337200 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:19:49.337771 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:19:49.699555 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:19:49.700149 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:20:04.345441 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:20:04.346009 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:20:04.704474 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:20:04.705050 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:20:08.113613 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:20:08.114271 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:20:08.114793 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:20:08.114817 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:20:15.064983 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:20:19.350786 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:20:19.351423 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:20:19.709522 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:20:19.710125 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:20:25.254378 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:20:34.359833 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:20:34.360402 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:20:34.715440 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:20:34.716000 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:20:35.657431 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:20:39.327660 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:20:39.328311 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:20:39.328921 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:20:39.328944 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:20:49.364481 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:20:49.365135 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:20:49.720165 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:20:49.720783 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:21:04.373702 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:21:04.374298 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:21:04.724530 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:21:04.725044 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:21:06.307611 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:21:13.830806 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:21:13.831476 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:21:13.832048 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:21:13.832084 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:21:16.443574 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:21:19.379290 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:21:19.379914 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:21:19.728605 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:21:19.729183 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:21:34.388419 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:21:34.388938 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:21:34.740054 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:21:34.740707 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:21:49.393484 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:21:49.394141 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:21:49.751424 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:21:49.752004 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:22:00.875172 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:22:00.875825 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:22:00.876466 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:22:00.876493 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:22:04.402099 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:22:04.402670 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:22:04.755842 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:22:04.756445 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:22:19.407346 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:22:19.407951 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:22:19.760303 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:22:19.760841 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:22:28.010145 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:22:34.415875 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:22:34.416433 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:22:34.765026 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:22:34.765608 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:22:38.314101 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:22:48.706634 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:22:48.707305 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:22:48.707872 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:22:48.707894 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:22:49.420933 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:22:49.421549 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:22:49.769177 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:22:49.769751 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:22:59.001249 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:23:04.429307 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:23:04.429881 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:23:04.773457 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:23:04.774046 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:23:09.342040 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:23:19.434192 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:23:19.434817 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:23:19.778646 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:23:19.779234 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:23:21.640987 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:23:21.641625 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:23:21.642196 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:23:21.642216 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:23:34.443373 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:23:34.443970 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:23:34.783093 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:23:34.783625 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:23:49.448819 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:23:49.449469 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:23:49.788031 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:23:49.788561 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:24:02.896814 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:24:02.897458 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:24:02.897982 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:24:02.898003 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:24:04.457409 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:24:04.458002 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:24:04.792849 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:24:04.793485 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:24:19.462618 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:24:19.463276 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:24:19.797544 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:24:19.798124 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:24:34.471587 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:24:34.472188 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:24:34.802586 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:24:34.803198 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:24:46.285898 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:24:46.286515 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:24:46.287048 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:24:46.287089 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:24:49.476579 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:24:49.477183 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:24:49.810263 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:24:49.810847 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:24:51.901619 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0213 18:25:02.019752 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:25:04.485233 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:25:04.485811 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:25:04.815137 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:25:04.815720 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:25:12.336455 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:25:19.490527 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:25:19.491174 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:25:19.824440 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:25:19.825017 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:25:22.547403 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:25:34.499695 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:25:34.500331 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:25:34.828849 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:25:34.829440 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:25:42.362906 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:25:42.363523 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:25:42.364042 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:25:42.364061 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0213 18:25:42.920028 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:25:49.505506 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:25:49.506111 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:25:49.832751 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:25:49.833292 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0213 18:26:02.948212 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0213 18:26:04.514536 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:26:04.515140 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:26:04.837335 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:26:04.837905 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:26:19.520036 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:26:19.520679 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:26:19.843242 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:26:19.843856 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:26:34.529208 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:26:34.529745 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:26:34.848857 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:26:34.849430 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:26:41.453815 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0213 18:26:41.454445 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0213 18:26:41.454997 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0213 18:26:41.455020 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0213 18:26:49.534401 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:26:49.535094 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0213 18:26:49.854202 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0213 18:26:49.854801 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError"