I0214 17:48:06.886107 1 serving.go:386] Generated self-signed cert in-memory I0214 17:48:07.064711 1 controllermanager.go:185] "Starting" version="v1.32.5" I0214 17:48:07.064726 1 controllermanager.go:187] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0214 17:48:07.065590 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/etc/kubernetes/pki/front-proxy-ca.crt" I0214 17:48:07.065599 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/pki/ca.crt" I0214 17:48:07.065662 1 secure_serving.go:213] Serving securely on 127.0.0.1:10257 I0214 17:48:07.065675 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" I0214 17:48:07.065819 1 leaderelection.go:257] attempting to acquire leader lease kube-system/kube-controller-manager... E0214 17:48:08.740619 1 leaderelection.go:436] error retrieving resource lock kube-system/kube-controller-manager: leases.coordination.k8s.io "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "kube-system" I0214 17:48:13.018084 1 leaderelection.go:271] successfully acquired lease kube-system/kube-controller-manager I0214 17:48:13.018144 1 event.go:389] "Event occurred" object="kube-system/kube-controller-manager" fieldPath="" kind="Lease" apiVersion="coordination.k8s.io/v1" type="Normal" reason="LeaderElection" message="kind-mapt-control-plane_21888660-0cd8-420b-bf8a-f47f821f4476 became leader" I0214 17:48:13.020036 1 controllermanager.go:765] "Started controller" controller="serviceaccount-token-controller" I0214 17:48:13.020052 1 shared_informer.go:313] Waiting for caches to sync for tokens I0214 17:48:13.031688 1 controllermanager.go:765] "Started controller" controller="endpointslice-controller" I0214 17:48:13.031812 1 endpointslice_controller.go:281] "Starting endpoint slice controller" logger="endpointslice-controller" I0214 17:48:13.031828 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice I0214 17:48:13.038189 1 controllermanager.go:765] "Started controller" controller="replicationcontroller-controller" I0214 17:48:13.038309 1 replica_set.go:217] "Starting controller" logger="replicationcontroller-controller" name="replicationcontroller" I0214 17:48:13.038322 1 shared_informer.go:313] Waiting for caches to sync for ReplicationController I0214 17:48:13.044380 1 controllermanager.go:765] "Started controller" controller="job-controller" I0214 17:48:13.044389 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="node-route-controller" I0214 17:48:13.044466 1 job_controller.go:243] "Starting job controller" logger="job-controller" I0214 17:48:13.044471 1 shared_informer.go:313] Waiting for caches to sync for job I0214 17:48:13.052454 1 controllermanager.go:765] "Started controller" controller="persistentvolumeclaim-protection-controller" I0214 17:48:13.052543 1 pvc_protection_controller.go:168] "Starting PVC protection controller" logger="persistentvolumeclaim-protection-controller" I0214 17:48:13.052549 1 shared_informer.go:313] Waiting for caches to sync for PVC protection I0214 17:48:13.062949 1 controllermanager.go:765] "Started controller" controller="ttl-after-finished-controller" I0214 17:48:13.063023 1 ttlafterfinished_controller.go:112] "Starting TTL after finished controller" logger="ttl-after-finished-controller" I0214 17:48:13.063030 1 shared_informer.go:313] Waiting for caches to sync for TTL after finished I0214 17:48:13.073384 1 controllermanager.go:765] "Started controller" controller="legacy-serviceaccount-token-cleaner-controller" I0214 17:48:13.073479 1 legacy_serviceaccount_token_cleaner.go:103] "Starting legacy service account token cleaner controller" logger="legacy-serviceaccount-token-cleaner-controller" I0214 17:48:13.073490 1 shared_informer.go:313] Waiting for caches to sync for legacy-service-account-token-cleaner I0214 17:48:13.092128 1 controllermanager.go:765] "Started controller" controller="validatingadmissionpolicy-status-controller" I0214 17:48:13.092138 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="service-cidr-controller" requiredFeatureGates=["MultiCIDRServiceAllocator"] I0214 17:48:13.092214 1 shared_informer.go:313] Waiting for caches to sync for validatingadmissionpolicy-status I0214 17:48:13.103055 1 controllermanager.go:765] "Started controller" controller="endpointslice-mirroring-controller" I0214 17:48:13.103171 1 endpointslicemirroring_controller.go:227] "Starting EndpointSliceMirroring controller" logger="endpointslice-mirroring-controller" I0214 17:48:13.103180 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice_mirroring I0214 17:48:13.113191 1 controllermanager.go:765] "Started controller" controller="replicaset-controller" I0214 17:48:13.113306 1 replica_set.go:217] "Starting controller" logger="replicaset-controller" name="replicaset" I0214 17:48:13.113316 1 shared_informer.go:313] Waiting for caches to sync for ReplicaSet I0214 17:48:13.120356 1 shared_informer.go:320] Caches are synced for tokens I0214 17:48:13.225806 1 range_allocator.go:112] "No Secondary Service CIDR provided. Skipping filtering out secondary service addresses" logger="node-ipam-controller" I0214 17:48:13.225826 1 controllermanager.go:765] "Started controller" controller="node-ipam-controller" I0214 17:48:13.225888 1 node_ipam_controller.go:141] "Starting ipam controller" logger="node-ipam-controller" I0214 17:48:13.225894 1 shared_informer.go:313] Waiting for caches to sync for node I0214 17:48:13.270044 1 node_lifecycle_controller.go:432] "Controller will reconcile labels" logger="node-lifecycle-controller" I0214 17:48:13.270074 1 controllermanager.go:765] "Started controller" controller="node-lifecycle-controller" I0214 17:48:13.270080 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="kube-apiserver-serving-clustertrustbundle-publisher-controller" requiredFeatureGates=["ClusterTrustBundle"] I0214 17:48:13.270096 1 controllermanager.go:743] "Warning: skipping controller" controller="storage-version-migrator-controller" I0214 17:48:13.270133 1 node_lifecycle_controller.go:466] "Sending events to api server" logger="node-lifecycle-controller" I0214 17:48:13.270144 1 node_lifecycle_controller.go:477] "Starting node controller" logger="node-lifecycle-controller" I0214 17:48:13.270149 1 shared_informer.go:313] Waiting for caches to sync for taint I0214 17:48:13.573171 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cronjobs.batch" I0214 17:48:13.573203 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="networkpolicies.networking.k8s.io" I0214 17:48:13.573215 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="roles.rbac.authorization.k8s.io" I0214 17:48:13.573228 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podtemplates" W0214 17:48:13.573236 1 shared_informer.go:597] resyncPeriod 17h57m11.324665304s is smaller than resyncCheckPeriod 22h48m31.587129353s and the informer has already started. Changing it to 22h48m31.587129353s I0214 17:48:13.573274 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="daemonsets.apps" I0214 17:48:13.573281 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="statefulsets.apps" I0214 17:48:13.573289 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="jobs.batch" I0214 17:48:13.573297 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="poddisruptionbudgets.policy" W0214 17:48:13.573310 1 shared_informer.go:597] resyncPeriod 17h35m36.651200999s is smaller than resyncCheckPeriod 22h48m31.587129353s and the informer has already started. Changing it to 22h48m31.587129353s I0214 17:48:13.573324 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="serviceaccounts" I0214 17:48:13.573330 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="replicasets.apps" I0214 17:48:13.573345 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ingresses.networking.k8s.io" I0214 17:48:13.573350 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="leases.coordination.k8s.io" I0214 17:48:13.573359 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="limitranges" I0214 17:48:13.573365 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="controllerrevisions.apps" I0214 17:48:13.573396 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="horizontalpodautoscalers.autoscaling" I0214 17:48:13.573410 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="csistoragecapacities.storage.k8s.io" I0214 17:48:13.573421 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpointslices.discovery.k8s.io" I0214 17:48:13.573435 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deployments.apps" I0214 17:48:13.573443 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="rolebindings.rbac.authorization.k8s.io" I0214 17:48:13.573453 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpoints" I0214 17:48:13.573466 1 controllermanager.go:765] "Started controller" controller="resourcequota-controller" I0214 17:48:13.573504 1 resource_quota_controller.go:300] "Starting resource quota controller" logger="resourcequota-controller" I0214 17:48:13.573517 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:48:13.573531 1 resource_quota_monitor.go:308] "QuotaMonitor running" logger="resourcequota-controller" I0214 17:48:13.870152 1 controllermanager.go:765] "Started controller" controller="horizontal-pod-autoscaler-controller" I0214 17:48:13.870179 1 horizontal.go:201] "Starting HPA controller" logger="horizontal-pod-autoscaler-controller" I0214 17:48:13.870185 1 shared_informer.go:313] Waiting for caches to sync for HPA I0214 17:48:14.023263 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-cleaner-controller" I0214 17:48:14.023291 1 cleaner.go:83] "Starting CSR cleaner controller" logger="certificatesigningrequest-cleaner-controller" I0214 17:48:14.174309 1 controllermanager.go:765] "Started controller" controller="persistentvolume-protection-controller" I0214 17:48:14.174323 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="volumeattributesclass-protection-controller" requiredFeatureGates=["VolumeAttributesClass"] I0214 17:48:14.174332 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="storageversion-garbage-collector-controller" requiredFeatureGates=["APIServerIdentity","StorageVersionAPI"] I0214 17:48:14.174335 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="resourceclaim-controller" requiredFeatureGates=["DynamicResourceAllocation"] I0214 17:48:14.174335 1 pv_protection_controller.go:81] "Starting PV protection controller" logger="persistentvolume-protection-controller" I0214 17:48:14.174362 1 shared_informer.go:313] Waiting for caches to sync for PV protection I0214 17:48:14.174338 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="selinux-warning-controller" requiredFeatureGates=["SELinuxChangePolicy"] I0214 17:48:14.325637 1 controllermanager.go:765] "Started controller" controller="persistentvolume-expander-controller" I0214 17:48:14.325673 1 expand_controller.go:329] "Starting expand controller" logger="persistentvolume-expander-controller" I0214 17:48:14.325680 1 shared_informer.go:313] Waiting for caches to sync for expand I0214 17:48:14.473533 1 controllermanager.go:765] "Started controller" controller="ephemeral-volume-controller" I0214 17:48:14.473578 1 controller.go:173] "Starting ephemeral volume controller" logger="ephemeral-volume-controller" I0214 17:48:14.473587 1 shared_informer.go:313] Waiting for caches to sync for ephemeral I0214 17:48:14.623506 1 controllermanager.go:765] "Started controller" controller="endpoints-controller" I0214 17:48:14.623573 1 endpoints_controller.go:182] "Starting endpoint controller" logger="endpoints-controller" I0214 17:48:14.623581 1 shared_informer.go:313] Waiting for caches to sync for endpoint I0214 17:48:14.869884 1 controllermanager.go:765] "Started controller" controller="garbage-collector-controller" I0214 17:48:14.870486 1 garbagecollector.go:144] "Starting controller" logger="garbage-collector-controller" controller="garbagecollector" I0214 17:48:14.870543 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:48:14.870599 1 graph_builder.go:351] "Running" logger="garbage-collector-controller" component="GraphBuilder" I0214 17:48:15.129093 1 controllermanager.go:765] "Started controller" controller="daemonset-controller" I0214 17:48:15.129145 1 daemon_controller.go:294] "Starting daemon sets controller" logger="daemonset-controller" I0214 17:48:15.129152 1 shared_informer.go:313] Waiting for caches to sync for daemon sets I0214 17:48:15.320029 1 controllermanager.go:765] "Started controller" controller="disruption-controller" I0214 17:48:15.320070 1 disruption.go:452] "Sending events to api server." logger="disruption-controller" I0214 17:48:15.320101 1 disruption.go:463] "Starting disruption controller" logger="disruption-controller" I0214 17:48:15.320109 1 shared_informer.go:313] Waiting for caches to sync for disruption I0214 17:48:15.370545 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-approving-controller" I0214 17:48:15.370573 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-approving-controller" name="csrapproving" I0214 17:48:15.370589 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrapproving I0214 17:48:15.523694 1 controllermanager.go:765] "Started controller" controller="token-cleaner-controller" I0214 17:48:15.523753 1 tokencleaner.go:117] "Starting token cleaner controller" logger="token-cleaner-controller" I0214 17:48:15.523760 1 shared_informer.go:313] Waiting for caches to sync for token_cleaner I0214 17:48:15.523765 1 shared_informer.go:320] Caches are synced for token_cleaner I0214 17:48:15.673607 1 controllermanager.go:765] "Started controller" controller="persistentvolume-binder-controller" I0214 17:48:15.673677 1 pv_controller_base.go:308] "Starting persistent volume controller" logger="persistentvolume-binder-controller" I0214 17:48:15.673686 1 shared_informer.go:313] Waiting for caches to sync for persistent volume I0214 17:48:15.922348 1 controllermanager.go:765] "Started controller" controller="namespace-controller" I0214 17:48:15.922382 1 namespace_controller.go:202] "Starting namespace controller" logger="namespace-controller" I0214 17:48:15.922389 1 shared_informer.go:313] Waiting for caches to sync for namespace I0214 17:48:16.073940 1 controllermanager.go:765] "Started controller" controller="serviceaccount-controller" I0214 17:48:16.073977 1 serviceaccounts_controller.go:114] "Starting service account controller" logger="serviceaccount-controller" I0214 17:48:16.073983 1 shared_informer.go:313] Waiting for caches to sync for service account I0214 17:48:16.223873 1 controllermanager.go:765] "Started controller" controller="statefulset-controller" I0214 17:48:16.223943 1 stateful_set.go:166] "Starting stateful set controller" logger="statefulset-controller" I0214 17:48:16.223952 1 shared_informer.go:313] Waiting for caches to sync for stateful set I0214 17:48:16.372966 1 controllermanager.go:765] "Started controller" controller="bootstrap-signer-controller" I0214 17:48:16.373010 1 shared_informer.go:313] Waiting for caches to sync for bootstrap_signer I0214 17:48:16.524208 1 controllermanager.go:765] "Started controller" controller="clusterrole-aggregation-controller" I0214 17:48:16.524325 1 clusterroleaggregation_controller.go:194] "Starting ClusterRoleAggregator controller" logger="clusterrole-aggregation-controller" I0214 17:48:16.524331 1 shared_informer.go:313] Waiting for caches to sync for ClusterRoleAggregator I0214 17:48:16.674604 1 controllermanager.go:765] "Started controller" controller="root-ca-certificate-publisher-controller" I0214 17:48:16.674635 1 publisher.go:107] "Starting root CA cert publisher controller" logger="root-ca-certificate-publisher-controller" I0214 17:48:16.674641 1 shared_informer.go:313] Waiting for caches to sync for crt configmap I0214 17:48:16.823244 1 controllermanager.go:765] "Started controller" controller="deployment-controller" I0214 17:48:16.823335 1 deployment_controller.go:173] "Starting controller" logger="deployment-controller" controller="deployment" I0214 17:48:16.823343 1 shared_informer.go:313] Waiting for caches to sync for deployment I0214 17:48:16.973927 1 controllermanager.go:765] "Started controller" controller="cronjob-controller" I0214 17:48:16.973988 1 cronjob_controllerv2.go:145] "Starting cronjob controller v2" logger="cronjob-controller" I0214 17:48:16.973995 1 shared_informer.go:313] Waiting for caches to sync for cronjob I0214 17:48:17.020961 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-serving" I0214 17:48:17.020976 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-serving I0214 17:48:17.020994 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0214 17:48:17.021138 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-client" I0214 17:48:17.021157 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-client I0214 17:48:17.021196 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0214 17:48:17.021242 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kube-apiserver-client" I0214 17:48:17.021277 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kube-apiserver-client I0214 17:48:17.021291 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0214 17:48:17.021396 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-signing-controller" I0214 17:48:17.021454 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-legacy-unknown" I0214 17:48:17.021465 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-legacy-unknown I0214 17:48:17.021475 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0214 17:48:17.174609 1 controllermanager.go:765] "Started controller" controller="persistentvolume-attach-detach-controller" I0214 17:48:17.174656 1 attach_detach_controller.go:338] "Starting attach detach controller" logger="persistentvolume-attach-detach-controller" I0214 17:48:17.174663 1 shared_informer.go:313] Waiting for caches to sync for attach detach I0214 17:48:17.219596 1 controllermanager.go:765] "Started controller" controller="taint-eviction-controller" I0214 17:48:17.219622 1 taint_eviction.go:281] "Starting" logger="taint-eviction-controller" controller="taint-eviction-controller" I0214 17:48:17.219629 1 taint_eviction.go:287] "Sending events to api server" logger="taint-eviction-controller" I0214 17:48:17.219652 1 shared_informer.go:313] Waiting for caches to sync for taint-eviction-controller I0214 17:48:17.373530 1 controllermanager.go:765] "Started controller" controller="pod-garbage-collector-controller" I0214 17:48:17.373561 1 gc_controller.go:99] "Starting GC controller" logger="pod-garbage-collector-controller" I0214 17:48:17.373566 1 shared_informer.go:313] Waiting for caches to sync for GC I0214 17:48:17.523333 1 controllermanager.go:765] "Started controller" controller="ttl-controller" I0214 17:48:17.523344 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="service-lb-controller" I0214 17:48:17.523348 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="cloud-node-lifecycle-controller" I0214 17:48:17.523484 1 ttl_controller.go:127] "Starting TTL controller" logger="ttl-controller" I0214 17:48:17.523494 1 shared_informer.go:313] Waiting for caches to sync for TTL I0214 17:48:17.524746 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:48:17.526887 1 shared_informer.go:320] Caches are synced for expand I0214 17:48:17.526911 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"kind-mapt-control-plane\" does not exist" I0214 17:48:17.526981 1 shared_informer.go:320] Caches are synced for node I0214 17:48:17.527032 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller" I0214 17:48:17.527062 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller" I0214 17:48:17.527065 1 shared_informer.go:313] Waiting for caches to sync for cidrallocator I0214 17:48:17.527068 1 shared_informer.go:320] Caches are synced for cidrallocator I0214 17:48:17.529213 1 shared_informer.go:320] Caches are synced for daemon sets I0214 17:48:17.531932 1 shared_informer.go:320] Caches are synced for endpoint_slice I0214 17:48:17.532413 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:48:17.534487 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="kind-mapt-control-plane" podCIDRs=["10.244.0.0/24"] I0214 17:48:17.534500 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:48:17.534515 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:48:17.538934 1 shared_informer.go:320] Caches are synced for ReplicationController I0214 17:48:17.544550 1 shared_informer.go:320] Caches are synced for job I0214 17:48:17.553298 1 shared_informer.go:320] Caches are synced for PVC protection I0214 17:48:17.563921 1 shared_informer.go:320] Caches are synced for TTL after finished I0214 17:48:17.570329 1 shared_informer.go:320] Caches are synced for taint I0214 17:48:17.570366 1 node_lifecycle_controller.go:1234] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone="" I0214 17:48:17.570402 1 node_lifecycle_controller.go:886] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="kind-mapt-control-plane" I0214 17:48:17.570423 1 node_lifecycle_controller.go:1038] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller" I0214 17:48:17.570485 1 shared_informer.go:320] Caches are synced for HPA I0214 17:48:17.570598 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:48:17.570603 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller" I0214 17:48:17.570606 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller" I0214 17:48:17.570952 1 shared_informer.go:320] Caches are synced for certificate-csrapproving I0214 17:48:17.574293 1 shared_informer.go:320] Caches are synced for cronjob I0214 17:48:17.574320 1 shared_informer.go:320] Caches are synced for persistent volume I0214 17:48:17.574327 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:48:17.574375 1 shared_informer.go:320] Caches are synced for legacy-service-account-token-cleaner I0214 17:48:17.574494 1 shared_informer.go:320] Caches are synced for bootstrap_signer I0214 17:48:17.574498 1 shared_informer.go:320] Caches are synced for PV protection I0214 17:48:17.574517 1 shared_informer.go:320] Caches are synced for ephemeral I0214 17:48:17.574528 1 shared_informer.go:320] Caches are synced for service account I0214 17:48:17.574615 1 shared_informer.go:320] Caches are synced for GC I0214 17:48:17.575318 1 shared_informer.go:320] Caches are synced for crt configmap I0214 17:48:17.575348 1 shared_informer.go:320] Caches are synced for attach detach I0214 17:48:17.592489 1 shared_informer.go:320] Caches are synced for validatingadmissionpolicy-status I0214 17:48:17.603685 1 shared_informer.go:320] Caches are synced for endpoint_slice_mirroring I0214 17:48:17.613894 1 shared_informer.go:320] Caches are synced for ReplicaSet I0214 17:48:17.620649 1 shared_informer.go:320] Caches are synced for disruption I0214 17:48:17.620677 1 shared_informer.go:320] Caches are synced for taint-eviction-controller I0214 17:48:17.621946 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kube-apiserver-client I0214 17:48:17.622096 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-serving I0214 17:48:17.622124 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-client I0214 17:48:17.622133 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-legacy-unknown I0214 17:48:17.623300 1 shared_informer.go:320] Caches are synced for namespace I0214 17:48:17.623510 1 shared_informer.go:320] Caches are synced for deployment I0214 17:48:17.623533 1 shared_informer.go:320] Caches are synced for TTL I0214 17:48:17.623606 1 shared_informer.go:320] Caches are synced for endpoint I0214 17:48:17.624033 1 shared_informer.go:320] Caches are synced for stateful set I0214 17:48:17.624788 1 shared_informer.go:320] Caches are synced for ClusterRoleAggregator I0214 17:48:17.624887 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:48:17.633184 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:48:18.475739 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:48:18.634630 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="206.08437ms" I0214 17:48:18.637661 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="211.042513ms" I0214 17:48:18.640749 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="6.088052ms" I0214 17:48:18.640788 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="23.42µs" I0214 17:48:18.643944 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="6.261182ms" I0214 17:48:18.643992 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="30.966µs" I0214 17:48:18.648166 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="30.793µs" I0214 17:48:30.547506 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:48:30.555441 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:48:30.560133 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="48.59µs" I0214 17:48:30.563107 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="45.724µs" I0214 17:48:30.563222 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="35.627µs" I0214 17:48:30.573809 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="36.229µs" I0214 17:48:30.582941 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="25.396µs" I0214 17:48:30.590372 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="43.814µs" I0214 17:48:32.572977 1 node_lifecycle_controller.go:1057] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller" I0214 17:48:35.589372 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="46.21µs" I0214 17:48:35.603551 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="5.641835ms" I0214 17:48:35.603589 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="21.524µs" I0214 17:48:35.612950 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="6.16684ms" I0214 17:48:35.613004 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="31.631µs" I0214 17:48:43.603183 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="80.795µs" I0214 17:48:43.618589 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="7.677227ms" I0214 17:48:43.618652 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="41.834µs" I0214 17:48:44.214747 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:49:10.835839 1 namespace_controller.go:187] "Namespace has been deleted" logger="namespace-controller" namespace="test-pvc-ns" I0214 17:49:14.684290 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:49:14.854054 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="18.322926ms" I0214 17:49:14.861602 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="7.526041ms" I0214 17:49:14.861649 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="28.646µs" I0214 17:49:14.864740 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="29.801µs" I0214 17:49:15.031874 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="15.479285ms" I0214 17:49:15.040579 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="8.679702ms" I0214 17:49:15.040640 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="39.194µs" I0214 17:49:15.042564 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="28.361µs" I0214 17:49:15.213071 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="17.349372ms" I0214 17:49:15.221078 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="7.983316ms" I0214 17:49:15.221128 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="30.421µs" I0214 17:49:15.228168 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="27.868µs" I0214 17:49:15.371429 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="0s" I0214 17:49:15.382608 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:15.390821 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:15.390856 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:15.406873 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:17.630824 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificates.cert-manager.io" I0214 17:49:17.630853 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificaterequests.cert-manager.io" I0214 17:49:17.630867 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="issuers.cert-manager.io" I0214 17:49:17.630882 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="challenges.acme.cert-manager.io" I0214 17:49:17.630904 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="orders.acme.cert-manager.io" I0214 17:49:17.630941 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:49:17.642358 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:49:18.677526 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="8.128548ms" I0214 17:49:18.677590 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-7459f86744" duration="39.657µs" I0214 17:49:18.731880 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:49:18.744187 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:49:19.680918 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="8.19552ms" I0214 17:49:19.680978 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-55949d4888" duration="36.515µs" I0214 17:49:21.676877 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="46.272µs" I0214 17:49:22.679507 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:23.687050 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:24.906823 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:49:27.689403 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="8.769017ms" I0214 17:49:27.689460 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-68cbf75c9c" duration="36.582µs" I0214 17:49:30.374537 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="17.586251ms" I0214 17:49:30.385641 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="9.451977ms" I0214 17:49:30.385711 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="52.094µs" I0214 17:49:32.695139 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:33.702561 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:33.786773 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:34.706929 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="62.982µs" I0214 17:49:34.718568 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:34.719004 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="1s" I0214 17:49:36.709898 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="57.699µs" I0214 17:49:44.723324 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="8.48675ms" I0214 17:49:44.723385 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-865b9c84ff" duration="40.841µs" I0214 17:49:45.160850 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:49:48.749477 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:49:49.850329 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:49:50.589915 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="23.925894ms" I0214 17:49:50.597497 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="7.554518ms" I0214 17:49:50.597870 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="348.541µs" I0214 17:49:50.601027 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="85.296µs" I0214 17:49:50.756908 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="15.787042ms" I0214 17:49:50.764651 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="7.718348ms" I0214 17:49:50.764696 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="24.935µs" I0214 17:49:50.767596 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="26.08µs" I0214 17:49:54.753444 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="8.003321ms" I0214 17:49:54.753500 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="28.052µs" I0214 17:49:56.759130 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="8.245248ms" I0214 17:49:56.759179 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="24.758µs" I0214 17:50:15.346659 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="25.618103ms" I0214 17:50:15.354157 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="7.470045ms" I0214 17:50:15.354223 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="46.501µs" I0214 17:50:15.357581 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="37.214µs" I0214 17:50:15.436446 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="15.868411ms" I0214 17:50:15.447132 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="10.661975ms" I0214 17:50:15.447181 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="32.12µs" I0214 17:50:15.453039 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="28.371µs" I0214 17:50:15.535615 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="15.615111ms" I0214 17:50:15.542945 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="7.307749ms" I0214 17:50:15.543004 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="44.404µs" I0214 17:50:15.546212 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="35.341µs" I0214 17:50:15.636356 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="15.67397ms" I0214 17:50:15.649070 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="12.690143ms" I0214 17:50:15.666730 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="17.640318ms" I0214 17:50:15.666795 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="46.425µs" I0214 17:50:15.738124 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="16.93432ms" I0214 17:50:15.747212 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="9.064189ms" I0214 17:50:15.747263 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="34.298µs" I0214 17:50:15.751202 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="27.169µs" I0214 17:50:15.956653 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:50:18.738228 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="verificationpolicies.tekton.dev" I0214 17:50:18.738279 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelineruns.tekton.dev" I0214 17:50:18.738300 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="taskruns.tekton.dev" I0214 17:50:18.738313 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="resolutionrequests.resolution.tekton.dev" I0214 17:50:18.738322 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="stepactions.tekton.dev" I0214 17:50:18.738331 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="customruns.tekton.dev" I0214 17:50:18.738343 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelines.tekton.dev" I0214 17:50:18.738360 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="tasks.tekton.dev" I0214 17:50:18.738429 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:50:18.793436 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="79.308µs" I0214 17:50:19.839354 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:50:19.855839 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:50:19.855873 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:50:20.799072 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="59.204µs" I0214 17:50:22.813273 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="10.148081ms" I0214 17:50:22.813359 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="47.105µs" I0214 17:50:24.807525 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="105.339µs" I0214 17:50:26.199545 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:50:26.821331 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="8.567998ms" I0214 17:50:26.821390 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="34.607µs" W0214 17:50:27.597193 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:50:27.605280 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:50:27.957700 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:50:27.966387 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:50:29.811029 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="13.654997ms" I0214 17:50:29.811094 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="39.522µs" I0214 17:50:31.804145 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="5.033208ms" I0214 17:50:31.804221 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="44.527µs" I0214 17:50:35.822643 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="8.566476ms" I0214 17:50:35.822710 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="39.936µs" I0214 17:50:36.492626 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:50:42.332914 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="15.588215ms" I0214 17:50:42.340900 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="7.951763ms" I0214 17:50:42.340957 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="37.868µs" I0214 17:50:42.347044 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="70.43µs" I0214 17:50:42.400715 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="18.927848ms" I0214 17:50:42.408052 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="7.314317ms" I0214 17:50:42.408110 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="29.643µs" I0214 17:50:42.411275 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="55.837µs" I0214 17:50:42.527482 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="11.149898ms" I0214 17:50:42.535085 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="7.580587ms" I0214 17:50:42.535138 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="36.142µs" I0214 17:50:42.538044 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="28.001µs" W0214 17:50:42.607682 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:50:42.608172 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:50:42.970580 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:50:42.971079 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:50:45.864383 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="10.051171ms" I0214 17:50:45.864478 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="63.972µs" I0214 17:50:46.533162 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:50:47.864417 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="7.626606ms" I0214 17:50:47.864507 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="63.853µs" I0214 17:50:49.842429 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="interceptors.triggers.tekton.dev" I0214 17:50:49.842450 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggerbindings.triggers.tekton.dev" I0214 17:50:49.842465 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggers.triggers.tekton.dev" I0214 17:50:49.842478 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="eventlisteners.triggers.tekton.dev" I0214 17:50:49.842484 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggertemplates.triggers.tekton.dev" I0214 17:50:49.842538 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:50:49.860881 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:50:49.942613 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:50:49.961031 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:50:50.865557 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="55.777µs" I0214 17:50:56.863689 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:50:57.613377 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:50:57.614194 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:50:57.974945 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:50:57.975417 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:51:01.879655 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="10.314513ms" I0214 17:51:01.879721 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="39.462µs" I0214 17:51:03.673455 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="18.332262ms" I0214 17:51:03.680917 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="7.430489ms" I0214 17:51:03.680988 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="49.048µs" I0214 17:51:03.683996 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="39.206µs" I0214 17:51:07.907187 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="7.523455ms" I0214 17:51:07.907281 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="70.09µs" I0214 17:51:10.273435 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="15.790323ms" I0214 17:51:10.289617 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="16.147143ms" I0214 17:51:10.289677 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="37.136µs" I0214 17:51:10.373239 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="15.868007ms" I0214 17:51:10.379814 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="6.517172ms" I0214 17:51:10.379902 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="61.024µs" I0214 17:51:10.383152 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="33.165µs" I0214 17:51:10.473208 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="15.598215ms" I0214 17:51:10.478992 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="5.757574ms" I0214 17:51:10.479075 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="38.586µs" I0214 17:51:10.482310 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="40.708µs" W0214 17:51:12.617969 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:51:12.618497 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:51:12.983905 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:51:12.984526 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:51:14.927689 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="74.388µs" I0214 17:51:16.935400 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="5.869459ms" I0214 17:51:16.935462 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="37.066µs" I0214 17:51:17.060235 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:51:19.949176 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="10.99056ms" I0214 17:51:19.949239 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="39.241µs" W0214 17:51:27.625227 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:51:27.625905 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:51:27.988327 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:51:27.988899 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:51:40.591219 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="65.477µs" I0214 17:51:40.607782 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="8.691137ms" I0214 17:51:40.607849 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="45.927µs" W0214 17:51:42.630749 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:51:42.631412 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:51:42.992929 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:51:42.993446 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:51:44.725314 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="19.587322ms" I0214 17:51:44.733209 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="7.870089ms" I0214 17:51:44.733285 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="57.281µs" I0214 17:51:44.741957 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="34.357µs" I0214 17:51:46.997504 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="62.174µs" I0214 17:51:47.013379 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="7.866479ms" I0214 17:51:47.013438 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="41.24µs" I0214 17:51:47.313591 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:51:49.948476 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="extensions.dashboard.tekton.dev" I0214 17:51:49.948537 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:51:49.969666 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:51:49.969689 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:51:50.049430 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:51:51.621583 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="14.653895ms" I0214 17:51:51.628642 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="7.006624ms" I0214 17:51:51.628711 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="49.074µs" I0214 17:51:51.634904 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="48.174µs" E0214 17:51:52.084701 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"rbac.authorization.k8s.io/v1\", Kind:\"ClusterRole\", Name:\"tekton-dashboard-backend-edit\", UID:\"d050e523-8771-4599-91a0-f295b1a0e682\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-static-bp4zz\", UID:\"624d3134-453a-4798-9d96-5f242e8a939f\", Controller:(*bool)(0xc00220f3f7), BlockOwnerDeletion:(*bool)(0xc00220f3f8)}}}: clusterroles.rbac.authorization.k8s.io \"tekton-dashboard-backend-edit\" not found" logger="UnhandledError" E0214 17:51:52.619883 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" E0214 17:51:52.784299 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"v1\", Kind:\"Service\", Name:\"tekton-dashboard\", UID:\"cec64ba6-c531-4e52-a2a0-29c27e9773a1\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"tekton-pipelines\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{(*garbagecollector.node)(0xc0035e3c20):struct {}{}}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-deployment-nlsxk\", UID:\"f3b45f48-8265-415e-b387-e84deea6659b\", Controller:(*bool)(0xc000425f17), BlockOwnerDeletion:(*bool)(0xc000425f18)}}}: services \"tekton-dashboard\" not found" logger="UnhandledError" E0214 17:51:52.809948 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"apps/v1\", Kind:\"Deployment\", Name:\"tekton-dashboard\", UID:\"9e1b53ba-ad9f-430d-8eb3-f48d534dfc6f\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"tekton-pipelines\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{(*garbagecollector.node)(0xc003f4af00):struct {}{}}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-deployment-nlsxk\", UID:\"f3b45f48-8265-415e-b387-e84deea6659b\", Controller:(*bool)(0xc00062963e), BlockOwnerDeletion:(*bool)(0xc00062963f)}}}: deployments.apps \"tekton-dashboard\" not found" logger="UnhandledError" I0214 17:51:52.888476 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="4.44µs" I0214 17:51:53.022064 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="8.708605ms" I0214 17:51:53.022152 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="61.37µs" I0214 17:51:53.047056 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="18.89011ms" I0214 17:51:53.052601 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="5.527295ms" I0214 17:51:53.052648 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="29.856µs" I0214 17:51:53.204384 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="21.290497ms" I0214 17:51:53.212945 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="8.496873ms" I0214 17:51:53.212998 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="37.679µs" I0214 17:51:53.216835 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="41.032µs" I0214 17:51:53.393054 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="63.901µs" W0214 17:51:54.010680 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:51:54.011296 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:51:54.011842 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:51:54.011871 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0214 17:51:54.030063 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="10.558933ms" I0214 17:51:54.030132 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="49.284µs" I0214 17:51:54.034497 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="42.096µs" I0214 17:51:54.050801 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="44.455µs" I0214 17:51:54.058983 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="20.526451ms" I0214 17:51:54.065839 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="6.830262ms" I0214 17:51:54.065905 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="40.846µs" I0214 17:51:54.189851 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="57.47µs" I0214 17:51:55.027740 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="69.94µs" I0214 17:51:55.035548 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="51.491µs" I0214 17:51:56.526673 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="18.550999ms" I0214 17:51:56.532491 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="5.783665ms" I0214 17:51:56.532543 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="32.103µs" I0214 17:51:56.535583 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="36.012µs" I0214 17:51:56.701618 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="19.502747ms" I0214 17:51:56.707548 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="5.906728ms" I0214 17:51:56.707592 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="28.94µs" I0214 17:51:56.710365 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="29.145µs" W0214 17:51:56.741745 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:51:56.742096 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:51:56.742403 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:51:56.742418 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0214 17:51:56.873594 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="17.377328ms" I0214 17:51:56.891226 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="17.609844ms" I0214 17:51:56.891285 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="41.168µs" W0214 17:51:57.636121 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:51:57.636805 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:51:57.997234 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:51:57.997707 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:51:59.344155 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="0s" I0214 17:51:59.349453 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:51:59.357270 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:51:59.357437 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:51:59.367528 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:00.872385 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-55b8d84497" duration="22.350929ms" I0214 17:52:00.880425 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-55b8d84497" duration="8.009225ms" I0214 17:52:00.880470 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-55b8d84497" duration="24.281µs" I0214 17:52:00.884517 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-55b8d84497" duration="25.039µs" I0214 17:52:01.043473 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="49.395µs" I0214 17:52:01.060670 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="7.498552ms" I0214 17:52:01.060734 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-5cff8fdbb9" duration="42.237µs" W0214 17:52:01.151404 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:52:01.151987 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:52:01.152419 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:52:01.152438 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0214 17:52:02.766002 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="19.183576ms" I0214 17:52:02.773682 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="7.629466ms" I0214 17:52:02.773739 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="36.782µs" I0214 17:52:02.783061 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="18.073µs" I0214 17:52:03.047942 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="56.854µs" I0214 17:52:06.068530 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="8.021153ms" I0214 17:52:06.068609 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-66c64db47f" duration="51.931µs" I0214 17:52:07.821677 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:52:11.531128 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:52:11.531820 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:52:11.532448 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:52:11.532476 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 17:52:12.641379 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:52:12.642076 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:52:13.001445 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:52:13.002081 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:52:14.065781 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="8.449409ms" I0214 17:52:14.065844 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-6f9fd4cbb5" duration="35.509µs" I0214 17:52:18.563303 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:52:19.976005 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:52:20.051888 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="repositories.pipelinesascode.tekton.dev" I0214 17:52:20.051927 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:52:20.051960 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:52:20.076674 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:52:20.097665 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:21.109139 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:23.117899 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-55b8d84497" duration="49.218µs" I0214 17:52:24.110609 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:25.116838 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:26.135953 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="8.245165ms" I0214 17:52:26.136001 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-5b599f9d9d" duration="26.716µs" W0214 17:52:26.539927 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:52:26.540816 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:52:26.541383 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:52:26.541407 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0214 17:52:26.798899 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="24.96112ms" I0214 17:52:26.806133 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="7.204086ms" I0214 17:52:26.806194 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="38.72µs" I0214 17:52:26.809276 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="39.877µs" I0214 17:52:27.120401 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" W0214 17:52:27.646670 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:52:27.647353 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:52:28.006204 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:52:28.006743 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:52:28.126746 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:28.696120 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:52:33.147191 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="5.733896ms" I0214 17:52:33.147275 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="52.791µs" I0214 17:52:34.120947 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-55b8d84497" duration="5.744839ms" I0214 17:52:34.120986 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-55b8d84497" duration="18.278µs" I0214 17:52:34.340879 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="21.823471ms" I0214 17:52:34.355489 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="14.574108ms" I0214 17:52:34.355589 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="27.896µs" I0214 17:52:34.360990 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="40.616µs" I0214 17:52:34.396539 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="15.706093ms" I0214 17:52:34.404639 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="8.075778ms" I0214 17:52:34.404703 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="46.27µs" I0214 17:52:34.413326 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="31.057µs" I0214 17:52:34.451188 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-bfd46c7d6" duration="8.316785ms" I0214 17:52:34.451246 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-bfd46c7d6" duration="39.699µs" I0214 17:52:34.510469 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-7dcdc4cc4d" duration="5.240638ms" I0214 17:52:34.510518 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-7dcdc4cc4d" duration="28.095µs" I0214 17:52:38.156697 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="58.016µs" I0214 17:52:39.790032 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:52:40.165647 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:40.174207 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="74.619µs" I0214 17:52:41.165876 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:41.173163 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:41.186853 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="9.568363ms" I0214 17:52:41.186929 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-5f9f9b856f" duration="50.14µs" I0214 17:52:41.313354 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:42.172628 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0214 17:52:42.178596 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" W0214 17:52:42.651992 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:52:42.652589 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:52:43.010270 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:52:43.010904 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:52:44.180532 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="63.899µs" I0214 17:52:49.801862 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:52:50.055375 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="signingkeies.dex.coreos.com" I0214 17:52:50.055399 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="offlinesessionses.dex.coreos.com" I0214 17:52:50.055410 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="updaterequests.kyverno.io" I0214 17:52:50.055417 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicerequests.dex.coreos.com" I0214 17:52:50.055427 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.policies.kyverno.io" I0214 17:52:50.055438 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="refreshtokens.dex.coreos.com" I0214 17:52:50.055448 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedvalidatingpolicies.policies.kyverno.io" I0214 17:52:50.055460 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="oauth2clients.dex.coreos.com" I0214 17:52:50.055469 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedmutatingpolicies.policies.kyverno.io" I0214 17:52:50.055479 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ephemeralreports.reports.kyverno.io" I0214 17:52:50.055488 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="passwords.dex.coreos.com" I0214 17:52:50.055495 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.kyverno.io" I0214 17:52:50.055503 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedimagevalidatingpolicies.policies.kyverno.io" I0214 17:52:50.055512 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespaceddeletingpolicies.policies.kyverno.io" I0214 17:52:50.055522 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="connectors.dex.coreos.com" I0214 17:52:50.055530 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authrequests.dex.coreos.com" I0214 17:52:50.055544 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedgeneratingpolicies.policies.kyverno.io" I0214 17:52:50.055554 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyreports.wgpolicyk8s.io" I0214 17:52:50.055563 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicetokens.dex.coreos.com" I0214 17:52:50.055571 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authcodes.dex.coreos.com" I0214 17:52:50.055579 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policies.kyverno.io" I0214 17:52:50.055589 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cleanuppolicies.kyverno.io" I0214 17:52:50.055737 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:52:50.084468 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:52:51.156343 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:52:51.184834 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:52:52.667221 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="69.074µs" I0214 17:52:52.683691 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="8.541455ms" I0214 17:52:52.683760 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-877795fc7" duration="48.222µs" W0214 17:52:57.656639 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:52:57.657220 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:52:58.014514 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:52:58.015072 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:52:59.177563 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:52:59.178234 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:52:59.179021 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:52:59.179045 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0214 17:52:59.850906 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:53:12.661476 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:53:12.662141 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:53:13.018801 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:53:13.019351 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:53:18.347123 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5b6566c9d6" duration="21.873895ms" I0214 17:53:18.352945 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5b6566c9d6" duration="5.789895ms" I0214 17:53:18.352999 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5b6566c9d6" duration="30.128µs" I0214 17:53:18.355994 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5b6566c9d6" duration="20.024µs" I0214 17:53:21.163679 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releases.appstudio.redhat.com" I0214 17:53:21.163718 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentdetectionqueries.appstudio.redhat.com" I0214 17:53:21.163746 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplanadmissions.appstudio.redhat.com" I0214 17:53:21.163763 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargets.appstudio.redhat.com" I0214 17:53:21.163793 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalservicesconfigs.appstudio.redhat.com" I0214 17:53:21.163813 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="components.appstudio.redhat.com" I0214 17:53:21.163846 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshots.appstudio.redhat.com" I0214 17:53:21.163870 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalrequests.appstudio.redhat.com" I0214 17:53:21.163899 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplans.appstudio.redhat.com" I0214 17:53:21.163915 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="applications.appstudio.redhat.com" I0214 17:53:21.163941 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="environments.appstudio.redhat.com" I0214 17:53:21.163967 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseserviceconfigs.appstudio.redhat.com" I0214 17:53:21.163986 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargetclaims.appstudio.redhat.com" I0214 17:53:21.164020 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="promotionruns.appstudio.redhat.com" I0214 17:53:21.164045 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="enterprisecontractpolicies.appstudio.redhat.com" I0214 17:53:21.164065 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshotenvironmentbindings.appstudio.redhat.com" I0214 17:53:21.164422 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:53:21.192573 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:53:22.265469 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:53:22.271479 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5b6566c9d6" duration="41.247µs" I0214 17:53:22.293407 1 shared_informer.go:320] Caches are synced for garbage collector W0214 17:53:27.666310 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:53:27.666869 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:53:28.023991 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:53:28.024631 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:53:29.401662 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-6d685fcd6f" duration="20.880112ms" I0214 17:53:29.420375 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-6d685fcd6f" duration="18.678374ms" I0214 17:53:29.420428 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-6d685fcd6f" duration="33.659µs" I0214 17:53:30.219416 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:53:33.284373 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5b6566c9d6" duration="8.277424ms" I0214 17:53:33.284434 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-5b6566c9d6" duration="32.171µs" I0214 17:53:36.302128 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-6d685fcd6f" duration="47.046µs" I0214 17:53:37.573386 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="18.724691ms" I0214 17:53:37.591221 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="17.801006ms" I0214 17:53:37.591295 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="49.141µs" I0214 17:53:37.591354 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="20.749µs" I0214 17:53:40.041830 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="26.053234ms" I0214 17:53:40.047334 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="5.471924ms" I0214 17:53:40.047386 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="24.977µs" I0214 17:53:40.050644 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="28.613µs" I0214 17:53:40.274717 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:53:42.318992 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="45.833µs" W0214 17:53:42.670432 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:53:42.670982 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:53:42.855477 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:53:42.855978 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:53:42.856512 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:53:42.856535 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 17:53:43.031305 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:53:43.031856 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:53:43.274137 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="20.332166ms" I0214 17:53:43.289074 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="14.909105ms" I0214 17:53:43.289126 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="30.994µs" I0214 17:53:44.325978 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="38.543µs" I0214 17:53:44.344364 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="7.72102ms" I0214 17:53:44.344415 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="28.924µs" I0214 17:53:47.318312 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-6d685fcd6f" duration="11.701195ms" I0214 17:53:47.318366 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-6d685fcd6f" duration="29.875µs" I0214 17:53:50.374669 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:53:52.269774 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentgroups.appstudio.redhat.com" I0214 17:53:52.269812 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="integrationtestscenarios.appstudio.redhat.com" I0214 17:53:52.269860 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:53:52.307616 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0214 17:53:53.308318 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:53:53.331145 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="8.137574ms" I0214 17:53:53.331313 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-f5668ffc8" duration="34.305µs" I0214 17:53:53.370504 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:53:55.362921 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="51.237µs" W0214 17:53:57.675906 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:53:57.676687 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:53:58.035909 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:53:58.036529 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:53:59.369595 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="71.127µs" I0214 17:54:00.373041 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="116.931µs" I0214 17:54:00.704645 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:54:06.392932 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="78.387µs" W0214 17:54:12.681111 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:54:12.681827 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:54:13.043264 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:54:13.043932 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:54:21.074438 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:54:21.648245 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:54:21.649400 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:54:21.650343 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:54:21.650367 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 17:54:27.686872 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:54:27.687595 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:54:28.047820 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:54:28.048541 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:54:31.361563 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:54:34.033320 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="cert-manager/cert-manager-startupapicheck" delay="0s" I0214 17:54:37.407506 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="7.970217ms" I0214 17:54:37.407568 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="40.46µs" I0214 17:54:41.645487 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:54:42.691414 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:54:42.691989 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:54:43.056320 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:54:43.056946 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:54:57.696664 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:54:57.697283 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:54:58.060907 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:54:58.061533 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:55:04.259156 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:55:04.259835 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:55:04.260409 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:55:04.260436 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0214 17:55:07.416375 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="7.990453ms" I0214 17:55:07.416481 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-85c5f6d4fd" duration="66.778µs" I0214 17:55:12.473184 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:55:12.701135 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:55:12.701773 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:55:13.068777 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:55:13.069271 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:55:15.965781 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7b8ccd8977" duration="19.934896ms" I0214 17:55:15.973624 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7b8ccd8977" duration="7.810632ms" I0214 17:55:15.973669 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7b8ccd8977" duration="25.582µs" I0214 17:55:15.987620 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7b8ccd8977" duration="25.812µs" I0214 17:55:22.668004 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:55:23.323945 1 shared_informer.go:313] Waiting for caches to sync for garbage collector W0214 17:55:23.381295 1 shared_informer.go:597] resyncPeriod 13h14m46.409543803s is smaller than resyncCheckPeriod 17h48m20.982504652s and the informer has already started. Changing it to 17h48m20.982504652s I0214 17:55:23.381323 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="imagerepositories.appstudio.redhat.com" I0214 17:55:23.381373 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0214 17:55:24.382118 1 shared_informer.go:320] Caches are synced for resource quota I0214 17:55:24.424625 1 shared_informer.go:320] Caches are synced for garbage collector I0214 17:55:25.584589 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7b8ccd8977" duration="49.144µs" W0214 17:55:27.705787 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:55:27.706518 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:55:28.073157 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:55:28.073916 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:55:36.595175 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7b8ccd8977" duration="8.039212ms" I0214 17:55:36.595230 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7b8ccd8977" duration="28.194µs" I0214 17:55:37.867615 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-b696cd8bc" duration="18.165465ms" I0214 17:55:37.879946 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-b696cd8bc" duration="12.300065ms" I0214 17:55:37.879996 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-b696cd8bc" duration="29.193µs" I0214 17:55:37.888605 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-b696cd8bc" duration="48.52µs" I0214 17:55:39.624288 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-b696cd8bc" duration="5.829567ms" I0214 17:55:39.624337 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-b696cd8bc" duration="21.959µs" I0214 17:55:39.645851 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="15.93366ms" I0214 17:55:39.656552 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="10.670742ms" I0214 17:55:39.656663 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="32.886µs" I0214 17:55:40.029411 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="47.58µs" I0214 17:55:40.623774 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="54.894µs" I0214 17:55:40.627620 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-64f675999f" duration="33.986µs" W0214 17:55:42.710149 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:55:42.710764 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:55:43.082198 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:55:43.082653 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:55:43.116303 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 17:55:53.515771 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:55:57.715128 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:55:57.715753 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:55:58.086813 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:55:58.087690 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:55:58.488867 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:55:58.489523 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:55:58.490056 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:55:58.490078 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0214 17:56:03.679770 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:56:12.720097 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:56:12.720713 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:56:13.094657 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:56:13.095274 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:56:27.725227 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:56:27.725877 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:56:28.104701 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:56:28.105348 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:56:30.140407 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:56:30.141137 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:56:30.141805 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:56:30.141827 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 17:56:42.730507 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:56:42.731169 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:56:43.111973 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:56:43.112552 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:56:57.735910 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:56:57.736950 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:56:58.115893 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:56:58.116476 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:57:01.276126 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:57:01.276994 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:57:01.277534 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:57:01.277556 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 17:57:12.741008 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:57:12.741666 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:57:13.123820 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:57:13.124385 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:57:27.745675 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:57:27.746451 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:57:28.128440 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:57:28.129042 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:57:42.750835 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:57:42.751423 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:57:43.136883 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:57:43.137423 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:57:47.571294 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:57:47.571934 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:57:47.572629 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:57:47.572651 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 17:57:57.755383 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:57:57.756027 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:57:58.140712 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:57:58.141335 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:58:05.897347 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:58:12.760008 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:58:12.760668 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:58:13.149155 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:58:13.149667 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 17:58:15.909966 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 17:58:22.445831 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:58:22.446644 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:58:22.447377 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:58:22.447398 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 17:58:27.765292 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:58:27.766177 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:58:28.152905 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:58:28.153419 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:58:42.770159 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:58:42.770696 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:58:43.159886 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:58:43.160476 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:58:57.775641 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:58:57.776319 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:58:58.164350 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:58:58.165029 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:59:01.329670 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:59:01.330482 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:59:01.331106 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:59:01.331133 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 17:59:12.780586 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:59:12.781140 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:59:13.171719 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:59:13.172276 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:59:27.785274 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:59:27.785910 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:59:28.177232 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:59:28.177887 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:59:42.797015 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:59:42.797974 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:59:43.184924 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:59:43.185546 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:59:47.745408 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 17:59:47.746070 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 17:59:47.746520 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 17:59:47.746545 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 17:59:57.802344 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:59:57.802941 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 17:59:58.189890 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 17:59:58.190577 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 18:00:00.109785 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="integration-service/integration-service-snapshot-garbage-collector-29518200" delay="0s" I0214 18:00:00.123652 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="integration-service/integration-service-snapshot-garbage-collector-29518200" delay="1s" I0214 18:00:00.131958 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="integration-service/integration-service-snapshot-garbage-collector-29518200" delay="1s" I0214 18:00:00.132083 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="integration-service/integration-service-snapshot-garbage-collector-29518200" delay="1s" I0214 18:00:00.148310 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="integration-service/integration-service-snapshot-garbage-collector-29518200" delay="1s" I0214 18:00:01.251613 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="integration-service/integration-service-snapshot-garbage-collector-29518200" delay="1s" I0214 18:00:02.342524 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="integration-service/integration-service-snapshot-garbage-collector-29518200" delay="1s" I0214 18:00:03.349405 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="integration-service/integration-service-snapshot-garbage-collector-29518200" delay="1s" I0214 18:00:03.355281 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="integration-service/integration-service-snapshot-garbage-collector-29518200" delay="1s" W0214 18:00:12.807548 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:00:12.808166 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:00:13.198059 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:00:13.198815 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:00:21.672749 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 18:00:21.673429 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 18:00:21.673978 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 18:00:21.674003 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 18:00:27.812166 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:00:27.812949 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:00:28.203644 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:00:28.204323 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:00:42.817319 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:00:42.818002 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:00:43.211781 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:00:43.212309 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:00:57.822183 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:00:57.822666 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:00:58.216768 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:00:58.217347 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 18:00:59.680058 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 18:01:09.146465 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 18:01:09.147297 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 18:01:09.148106 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 18:01:09.148132 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 18:01:12.827439 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:01:12.828126 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:01:13.224894 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:01:13.225521 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 18:01:19.870602 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 18:01:27.833356 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:01:27.834080 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:01:28.228993 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:01:28.229634 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 18:01:40.209519 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 18:01:42.838569 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:01:42.839195 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:01:43.237325 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:01:43.237852 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 18:01:50.614755 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 18:01:57.844268 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:01:57.844976 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:01:58.241461 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:01:58.242014 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:02:03.295294 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 18:02:03.296035 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 18:02:03.296738 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 18:02:03.296767 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0214 18:02:12.849086 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:02:12.849748 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:02:13.249755 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:02:13.250468 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:02:27.854090 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:02:27.854801 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:02:28.254640 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:02:28.255210 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0214 18:02:31.363312 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0214 18:02:41.561943 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0214 18:02:42.858851 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:02:42.859506 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:02:43.261996 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0214 18:02:43.262513 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0214 18:02:51.647485 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0214 18:02:51.648547 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0214 18:02:51.649151 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0214 18:02:51.649178 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0214 18:02:51.772332 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane"