I0127 14:11:24.107843 1 serving.go:386] Generated self-signed cert in-memory I0127 14:11:24.370030 1 controllermanager.go:185] "Starting" version="v1.32.5" I0127 14:11:24.370044 1 controllermanager.go:187] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0127 14:11:24.371052 1 dynamic_cafile_content.go:161] "Starting controller" name="request-header::/etc/kubernetes/pki/front-proxy-ca.crt" I0127 14:11:24.371053 1 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/pki/ca.crt" I0127 14:11:24.371334 1 secure_serving.go:213] Serving securely on 127.0.0.1:10257 I0127 14:11:24.371345 1 tlsconfig.go:243] "Starting DynamicServingCertificateController" I0127 14:11:24.371702 1 leaderelection.go:257] attempting to acquire leader lease kube-system/kube-controller-manager... E0127 14:11:25.709801 1 leaderelection.go:436] error retrieving resource lock kube-system/kube-controller-manager: leases.coordination.k8s.io "kube-controller-manager" is forbidden: User "system:kube-controller-manager" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "kube-system" I0127 14:11:28.136592 1 leaderelection.go:271] successfully acquired lease kube-system/kube-controller-manager I0127 14:11:28.136668 1 event.go:389] "Event occurred" object="kube-system/kube-controller-manager" fieldPath="" kind="Lease" apiVersion="coordination.k8s.io/v1" type="Normal" reason="LeaderElection" message="kind-mapt-control-plane_6f51181a-ebcb-4961-bec9-973c1a91c797 became leader" I0127 14:11:28.137921 1 controllermanager.go:765] "Started controller" controller="serviceaccount-token-controller" I0127 14:11:28.137984 1 shared_informer.go:313] Waiting for caches to sync for tokens I0127 14:11:28.151693 1 controllermanager.go:765] "Started controller" controller="ephemeral-volume-controller" I0127 14:11:28.151720 1 controller.go:173] "Starting ephemeral volume controller" logger="ephemeral-volume-controller" I0127 14:11:28.151730 1 shared_informer.go:313] Waiting for caches to sync for ephemeral I0127 14:11:28.167491 1 controllermanager.go:765] "Started controller" controller="validatingadmissionpolicy-status-controller" I0127 14:11:28.167597 1 shared_informer.go:313] Waiting for caches to sync for validatingadmissionpolicy-status I0127 14:11:28.173697 1 controllermanager.go:765] "Started controller" controller="endpointslice-controller" I0127 14:11:28.173822 1 endpointslice_controller.go:281] "Starting endpoint slice controller" logger="endpointslice-controller" I0127 14:11:28.173831 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice I0127 14:11:28.179374 1 controllermanager.go:765] "Started controller" controller="serviceaccount-controller" I0127 14:11:28.179429 1 serviceaccounts_controller.go:114] "Starting service account controller" logger="serviceaccount-controller" I0127 14:11:28.179439 1 shared_informer.go:313] Waiting for caches to sync for service account I0127 14:11:28.186339 1 controllermanager.go:765] "Started controller" controller="garbage-collector-controller" I0127 14:11:28.186390 1 garbagecollector.go:144] "Starting controller" logger="garbage-collector-controller" controller="garbagecollector" I0127 14:11:28.186409 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0127 14:11:28.186429 1 graph_builder.go:351] "Running" logger="garbage-collector-controller" component="GraphBuilder" I0127 14:11:28.191882 1 controllermanager.go:765] "Started controller" controller="bootstrap-signer-controller" I0127 14:11:28.191962 1 shared_informer.go:313] Waiting for caches to sync for bootstrap_signer I0127 14:11:28.198147 1 controllermanager.go:765] "Started controller" controller="persistentvolume-attach-detach-controller" I0127 14:11:28.198205 1 attach_detach_controller.go:338] "Starting attach detach controller" logger="persistentvolume-attach-detach-controller" I0127 14:11:28.198215 1 shared_informer.go:313] Waiting for caches to sync for attach detach I0127 14:11:28.203768 1 controllermanager.go:765] "Started controller" controller="job-controller" I0127 14:11:28.203883 1 job_controller.go:243] "Starting job controller" logger="job-controller" I0127 14:11:28.203892 1 shared_informer.go:313] Waiting for caches to sync for job I0127 14:11:28.238761 1 shared_informer.go:320] Caches are synced for tokens I0127 14:11:28.292526 1 controllermanager.go:765] "Started controller" controller="deployment-controller" I0127 14:11:28.292636 1 deployment_controller.go:173] "Starting controller" logger="deployment-controller" controller="deployment" I0127 14:11:28.292645 1 shared_informer.go:313] Waiting for caches to sync for deployment I0127 14:11:28.442832 1 node_lifecycle_controller.go:432] "Controller will reconcile labels" logger="node-lifecycle-controller" I0127 14:11:28.442869 1 controllermanager.go:765] "Started controller" controller="node-lifecycle-controller" I0127 14:11:28.442913 1 node_lifecycle_controller.go:466] "Sending events to api server" logger="node-lifecycle-controller" I0127 14:11:28.442926 1 node_lifecycle_controller.go:477] "Starting node controller" logger="node-lifecycle-controller" I0127 14:11:28.442935 1 shared_informer.go:313] Waiting for caches to sync for taint I0127 14:11:28.592206 1 controllermanager.go:765] "Started controller" controller="persistentvolume-protection-controller" I0127 14:11:28.592221 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="service-cidr-controller" requiredFeatureGates=["MultiCIDRServiceAllocator"] I0127 14:11:28.592243 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="storageversion-garbage-collector-controller" requiredFeatureGates=["APIServerIdentity","StorageVersionAPI"] I0127 14:11:28.592275 1 pv_protection_controller.go:81] "Starting PV protection controller" logger="persistentvolume-protection-controller" I0127 14:11:28.592285 1 shared_informer.go:313] Waiting for caches to sync for PV protection I0127 14:11:28.742638 1 controllermanager.go:765] "Started controller" controller="legacy-serviceaccount-token-cleaner-controller" I0127 14:11:28.742678 1 legacy_serviceaccount_token_cleaner.go:103] "Starting legacy service account token cleaner controller" logger="legacy-serviceaccount-token-cleaner-controller" I0127 14:11:28.742687 1 shared_informer.go:313] Waiting for caches to sync for legacy-service-account-token-cleaner I0127 14:11:28.789128 1 controllermanager.go:765] "Started controller" controller="taint-eviction-controller" I0127 14:11:28.789169 1 taint_eviction.go:281] "Starting" logger="taint-eviction-controller" controller="taint-eviction-controller" I0127 14:11:28.789191 1 taint_eviction.go:287] "Sending events to api server" logger="taint-eviction-controller" I0127 14:11:28.789209 1 shared_informer.go:313] Waiting for caches to sync for taint-eviction-controller I0127 14:11:28.942328 1 controllermanager.go:765] "Started controller" controller="ttl-controller" I0127 14:11:28.942343 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="cloud-node-lifecycle-controller" I0127 14:11:28.942390 1 ttl_controller.go:127] "Starting TTL controller" logger="ttl-controller" I0127 14:11:28.942399 1 shared_informer.go:313] Waiting for caches to sync for TTL I0127 14:11:29.092480 1 controllermanager.go:765] "Started controller" controller="clusterrole-aggregation-controller" I0127 14:11:29.092572 1 clusterroleaggregation_controller.go:194] "Starting ClusterRoleAggregator controller" logger="clusterrole-aggregation-controller" I0127 14:11:29.092594 1 shared_informer.go:313] Waiting for caches to sync for ClusterRoleAggregator I0127 14:11:29.243112 1 controllermanager.go:765] "Started controller" controller="root-ca-certificate-publisher-controller" I0127 14:11:29.243135 1 publisher.go:107] "Starting root CA cert publisher controller" logger="root-ca-certificate-publisher-controller" I0127 14:11:29.243136 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="kube-apiserver-serving-clustertrustbundle-publisher-controller" requiredFeatureGates=["ClusterTrustBundle"] I0127 14:11:29.243147 1 shared_informer.go:313] Waiting for caches to sync for crt configmap I0127 14:11:29.392563 1 controllermanager.go:765] "Started controller" controller="daemonset-controller" I0127 14:11:29.392685 1 daemon_controller.go:294] "Starting daemon sets controller" logger="daemonset-controller" I0127 14:11:29.392696 1 shared_informer.go:313] Waiting for caches to sync for daemon sets I0127 14:11:29.688795 1 controllermanager.go:765] "Started controller" controller="horizontal-pod-autoscaler-controller" I0127 14:11:29.688832 1 horizontal.go:201] "Starting HPA controller" logger="horizontal-pod-autoscaler-controller" I0127 14:11:29.688840 1 shared_informer.go:313] Waiting for caches to sync for HPA I0127 14:11:29.840708 1 controllermanager.go:765] "Started controller" controller="cronjob-controller" I0127 14:11:29.840806 1 cronjob_controllerv2.go:145] "Starting cronjob controller v2" logger="cronjob-controller" I0127 14:11:29.840818 1 shared_informer.go:313] Waiting for caches to sync for cronjob I0127 14:11:29.990544 1 controllermanager.go:765] "Started controller" controller="persistentvolume-expander-controller" I0127 14:11:29.990557 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="volumeattributesclass-protection-controller" requiredFeatureGates=["VolumeAttributesClass"] I0127 14:11:29.990598 1 expand_controller.go:329] "Starting expand controller" logger="persistentvolume-expander-controller" I0127 14:11:29.990607 1 shared_informer.go:313] Waiting for caches to sync for expand I0127 14:11:30.141926 1 controllermanager.go:765] "Started controller" controller="replicationcontroller-controller" I0127 14:11:30.141980 1 replica_set.go:217] "Starting controller" logger="replicationcontroller-controller" name="replicationcontroller" I0127 14:11:30.141990 1 shared_informer.go:313] Waiting for caches to sync for ReplicationController I0127 14:11:30.291506 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-cleaner-controller" I0127 14:11:30.291556 1 cleaner.go:83] "Starting CSR cleaner controller" logger="certificatesigningrequest-cleaner-controller" I0127 14:11:30.441515 1 range_allocator.go:112] "No Secondary Service CIDR provided. Skipping filtering out secondary service addresses" logger="node-ipam-controller" I0127 14:11:30.441541 1 controllermanager.go:765] "Started controller" controller="node-ipam-controller" I0127 14:11:30.441546 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="service-lb-controller" I0127 14:11:30.441549 1 controllermanager.go:723] "Skipping a cloud provider controller" controller="node-route-controller" I0127 14:11:30.441834 1 node_ipam_controller.go:141] "Starting ipam controller" logger="node-ipam-controller" I0127 14:11:30.441847 1 shared_informer.go:313] Waiting for caches to sync for node I0127 14:11:30.745059 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpointslices.discovery.k8s.io" I0127 14:11:30.745105 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cronjobs.batch" I0127 14:11:30.745137 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ingresses.networking.k8s.io" I0127 14:11:30.745164 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="csistoragecapacities.storage.k8s.io" I0127 14:11:30.745183 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="statefulsets.apps" I0127 14:11:30.745206 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="horizontalpodautoscalers.autoscaling" I0127 14:11:30.745230 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="poddisruptionbudgets.policy" I0127 14:11:30.745245 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="rolebindings.rbac.authorization.k8s.io" I0127 14:11:30.745266 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="endpoints" I0127 14:11:30.745287 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deployments.apps" I0127 14:11:30.745302 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="replicasets.apps" I0127 14:11:30.745314 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="roles.rbac.authorization.k8s.io" I0127 14:11:30.745324 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="leases.coordination.k8s.io" I0127 14:11:30.745340 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="podtemplates" I0127 14:11:30.745350 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="daemonsets.apps" I0127 14:11:30.745364 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="limitranges" I0127 14:11:30.745376 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="controllerrevisions.apps" W0127 14:11:30.745407 1 shared_informer.go:597] resyncPeriod 13h20m30.609031976s is smaller than resyncCheckPeriod 17h33m21.691775592s and the informer has already started. Changing it to 17h33m21.691775592s I0127 14:11:30.745442 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="serviceaccounts" I0127 14:11:30.745460 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="jobs.batch" I0127 14:11:30.745490 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="networkpolicies.networking.k8s.io" I0127 14:11:30.745512 1 controllermanager.go:765] "Started controller" controller="resourcequota-controller" I0127 14:11:30.745564 1 resource_quota_controller.go:300] "Starting resource quota controller" logger="resourcequota-controller" I0127 14:11:30.745577 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:11:30.745594 1 resource_quota_monitor.go:308] "QuotaMonitor running" logger="resourcequota-controller" I0127 14:11:30.995630 1 controllermanager.go:765] "Started controller" controller="namespace-controller" I0127 14:11:30.995669 1 namespace_controller.go:202] "Starting namespace controller" logger="namespace-controller" I0127 14:11:30.995678 1 shared_informer.go:313] Waiting for caches to sync for namespace I0127 14:11:31.039229 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-approving-controller" I0127 14:11:31.039268 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-approving-controller" name="csrapproving" I0127 14:11:31.039278 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrapproving I0127 14:11:31.192211 1 controllermanager.go:765] "Started controller" controller="ttl-after-finished-controller" I0127 14:11:31.192225 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="resourceclaim-controller" requiredFeatureGates=["DynamicResourceAllocation"] I0127 14:11:31.192268 1 ttlafterfinished_controller.go:112] "Starting TTL after finished controller" logger="ttl-after-finished-controller" I0127 14:11:31.192279 1 shared_informer.go:313] Waiting for caches to sync for TTL after finished I0127 14:11:31.343150 1 controllermanager.go:765] "Started controller" controller="persistentvolumeclaim-protection-controller" I0127 14:11:31.343196 1 pvc_protection_controller.go:168] "Starting PVC protection controller" logger="persistentvolumeclaim-protection-controller" I0127 14:11:31.343206 1 shared_informer.go:313] Waiting for caches to sync for PVC protection I0127 14:11:31.491944 1 controllermanager.go:765] "Started controller" controller="endpoints-controller" I0127 14:11:31.492014 1 endpoints_controller.go:182] "Starting endpoint controller" logger="endpoints-controller" I0127 14:11:31.492027 1 shared_informer.go:313] Waiting for caches to sync for endpoint I0127 14:11:31.688115 1 controllermanager.go:765] "Started controller" controller="disruption-controller" I0127 14:11:31.688155 1 disruption.go:452] "Sending events to api server." logger="disruption-controller" I0127 14:11:31.688182 1 disruption.go:463] "Starting disruption controller" logger="disruption-controller" I0127 14:11:31.688190 1 shared_informer.go:313] Waiting for caches to sync for disruption I0127 14:11:31.738928 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-serving" I0127 14:11:31.738951 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-serving I0127 14:11:31.738967 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0127 14:11:31.739099 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kubelet-client" I0127 14:11:31.739109 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kubelet-client I0127 14:11:31.739124 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0127 14:11:31.739225 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-kube-apiserver-client" I0127 14:11:31.739233 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-kube-apiserver-client I0127 14:11:31.739245 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0127 14:11:31.739370 1 controllermanager.go:765] "Started controller" controller="certificatesigningrequest-signing-controller" I0127 14:11:31.739399 1 certificate_controller.go:120] "Starting certificate controller" logger="certificatesigningrequest-signing-controller" name="csrsigning-legacy-unknown" I0127 14:11:31.739406 1 shared_informer.go:313] Waiting for caches to sync for certificate-csrsigning-legacy-unknown I0127 14:11:31.739416 1 dynamic_serving_content.go:135] "Starting controller" name="csr-controller::/etc/kubernetes/pki/ca.crt::/etc/kubernetes/pki/ca.key" I0127 14:11:31.895906 1 controllermanager.go:765] "Started controller" controller="token-cleaner-controller" I0127 14:11:31.895926 1 controllermanager.go:743] "Warning: skipping controller" controller="storage-version-migrator-controller" I0127 14:11:31.895932 1 controllermanager.go:717] "Controller is disabled by a feature gate" controller="selinux-warning-controller" requiredFeatureGates=["SELinuxChangePolicy"] I0127 14:11:31.895961 1 tokencleaner.go:117] "Starting token cleaner controller" logger="token-cleaner-controller" I0127 14:11:31.895980 1 shared_informer.go:313] Waiting for caches to sync for token_cleaner I0127 14:11:31.895988 1 shared_informer.go:320] Caches are synced for token_cleaner I0127 14:11:32.043166 1 controllermanager.go:765] "Started controller" controller="endpointslice-mirroring-controller" I0127 14:11:32.043231 1 endpointslicemirroring_controller.go:227] "Starting EndpointSliceMirroring controller" logger="endpointslice-mirroring-controller" I0127 14:11:32.043242 1 shared_informer.go:313] Waiting for caches to sync for endpoint_slice_mirroring I0127 14:11:32.194267 1 controllermanager.go:765] "Started controller" controller="pod-garbage-collector-controller" I0127 14:11:32.194308 1 gc_controller.go:99] "Starting GC controller" logger="pod-garbage-collector-controller" I0127 14:11:32.194317 1 shared_informer.go:313] Waiting for caches to sync for GC I0127 14:11:32.343505 1 controllermanager.go:765] "Started controller" controller="replicaset-controller" I0127 14:11:32.343570 1 replica_set.go:217] "Starting controller" logger="replicaset-controller" name="replicaset" I0127 14:11:32.343580 1 shared_informer.go:313] Waiting for caches to sync for ReplicaSet I0127 14:11:32.492793 1 controllermanager.go:765] "Started controller" controller="statefulset-controller" I0127 14:11:32.492902 1 stateful_set.go:166] "Starting stateful set controller" logger="statefulset-controller" I0127 14:11:32.492911 1 shared_informer.go:313] Waiting for caches to sync for stateful set I0127 14:11:32.643084 1 controllermanager.go:765] "Started controller" controller="persistentvolume-binder-controller" I0127 14:11:32.643219 1 pv_controller_base.go:308] "Starting persistent volume controller" logger="persistentvolume-binder-controller" I0127 14:11:32.643231 1 shared_informer.go:313] Waiting for caches to sync for persistent volume I0127 14:11:32.644694 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:11:32.648536 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0127 14:11:32.650163 1 actual_state_of_world.go:541] "Failed to update statusUpdateNeeded field in actual state of world" logger="persistentvolume-attach-detach-controller" err="Failed to set statusUpdateNeeded to needed true, because nodeName=\"kind-mapt-control-plane\" does not exist" I0127 14:11:32.651987 1 shared_informer.go:320] Caches are synced for ephemeral I0127 14:11:32.667645 1 shared_informer.go:320] Caches are synced for validatingadmissionpolicy-status I0127 14:11:32.674211 1 shared_informer.go:320] Caches are synced for endpoint_slice I0127 14:11:32.680463 1 shared_informer.go:320] Caches are synced for service account I0127 14:11:32.686687 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:11:32.686698 1 garbagecollector.go:154] "Garbage collector: all resource monitors have synced" logger="garbage-collector-controller" I0127 14:11:32.686704 1 garbagecollector.go:157] "Proceeding to collect garbage" logger="garbage-collector-controller" I0127 14:11:32.689094 1 shared_informer.go:320] Caches are synced for disruption I0127 14:11:32.689176 1 shared_informer.go:320] Caches are synced for HPA I0127 14:11:32.689257 1 shared_informer.go:320] Caches are synced for taint-eviction-controller I0127 14:11:32.691517 1 shared_informer.go:320] Caches are synced for expand I0127 14:11:32.692660 1 shared_informer.go:320] Caches are synced for TTL after finished I0127 14:11:32.692673 1 shared_informer.go:320] Caches are synced for deployment I0127 14:11:32.692686 1 shared_informer.go:320] Caches are synced for PV protection I0127 14:11:32.692695 1 shared_informer.go:320] Caches are synced for ClusterRoleAggregator I0127 14:11:32.692688 1 shared_informer.go:320] Caches are synced for endpoint I0127 14:11:32.692697 1 shared_informer.go:320] Caches are synced for bootstrap_signer I0127 14:11:32.692822 1 shared_informer.go:320] Caches are synced for daemon sets I0127 14:11:32.692934 1 shared_informer.go:320] Caches are synced for stateful set I0127 14:11:32.695053 1 shared_informer.go:320] Caches are synced for GC I0127 14:11:32.696156 1 shared_informer.go:320] Caches are synced for namespace I0127 14:11:32.698374 1 shared_informer.go:320] Caches are synced for attach detach I0127 14:11:32.704633 1 shared_informer.go:320] Caches are synced for job I0127 14:11:32.739416 1 shared_informer.go:320] Caches are synced for certificate-csrapproving I0127 14:11:32.739425 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kube-apiserver-client I0127 14:11:32.739466 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-serving I0127 14:11:32.739464 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-kubelet-client I0127 14:11:32.739475 1 shared_informer.go:320] Caches are synced for certificate-csrsigning-legacy-unknown I0127 14:11:32.741682 1 shared_informer.go:320] Caches are synced for cronjob I0127 14:11:32.742839 1 shared_informer.go:320] Caches are synced for legacy-service-account-token-cleaner I0127 14:11:32.742845 1 shared_informer.go:320] Caches are synced for node I0127 14:11:32.742852 1 shared_informer.go:320] Caches are synced for TTL I0127 14:11:32.742881 1 shared_informer.go:320] Caches are synced for ReplicationController I0127 14:11:32.742899 1 range_allocator.go:177] "Sending events to api server" logger="node-ipam-controller" I0127 14:11:32.742935 1 range_allocator.go:183] "Starting range CIDR allocator" logger="node-ipam-controller" I0127 14:11:32.742940 1 shared_informer.go:313] Waiting for caches to sync for cidrallocator I0127 14:11:32.742945 1 shared_informer.go:320] Caches are synced for cidrallocator I0127 14:11:32.743031 1 shared_informer.go:320] Caches are synced for taint I0127 14:11:32.743078 1 node_lifecycle_controller.go:1234] "Initializing eviction metric for zone" logger="node-lifecycle-controller" zone="" I0127 14:11:32.743125 1 node_lifecycle_controller.go:886] "Missing timestamp for Node. Assuming now as a timestamp" logger="node-lifecycle-controller" node="kind-mapt-control-plane" I0127 14:11:32.743154 1 node_lifecycle_controller.go:1038] "Controller detected that all Nodes are not-Ready. Entering master disruption mode" logger="node-lifecycle-controller" I0127 14:11:32.743180 1 shared_informer.go:320] Caches are synced for crt configmap I0127 14:11:32.743259 1 shared_informer.go:320] Caches are synced for persistent volume I0127 14:11:32.743270 1 shared_informer.go:320] Caches are synced for PVC protection I0127 14:11:32.743262 1 shared_informer.go:320] Caches are synced for endpoint_slice_mirroring I0127 14:11:32.744186 1 shared_informer.go:320] Caches are synced for ReplicaSet I0127 14:11:32.745252 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:11:32.746334 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:11:32.750036 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:11:32.750987 1 range_allocator.go:428] "Set node PodCIDR" logger="node-ipam-controller" node="kind-mapt-control-plane" podCIDRs=["10.244.0.0/24"] I0127 14:11:32.751007 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:11:32.751028 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:11:33.645943 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:11:33.763664 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="314.357785ms" I0127 14:11:33.763675 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="317.451624ms" I0127 14:11:33.771208 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="7.470875ms" I0127 14:11:33.771227 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="7.526985ms" I0127 14:11:33.771261 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="22.131µs" I0127 14:11:33.771266 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="38.651µs" I0127 14:11:33.779247 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="29.1µs" I0127 14:11:41.059468 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:11:45.707998 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:11:45.714960 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:11:45.720027 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="33.21µs" I0127 14:11:45.723107 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="45.851µs" I0127 14:11:45.723134 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="14.13µs" I0127 14:11:45.733788 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="23.1µs" I0127 14:11:45.742656 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="38.601µs" I0127 14:11:45.749421 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="30.19µs" I0127 14:11:47.745310 1 node_lifecycle_controller.go:1057] "Controller detected that some Nodes are Ready. Exiting master disruption mode" logger="node-lifecycle-controller" I0127 14:11:50.902625 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="50.991µs" I0127 14:11:51.324253 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:11:51.902623 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="5.860315ms" I0127 14:11:51.902704 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="local-path-storage/local-path-provisioner-7dc846544d" duration="52.421µs" I0127 14:11:51.913053 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="7.409064ms" I0127 14:11:51.913089 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="20.44µs" I0127 14:11:51.921784 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="25.25µs" I0127 14:11:52.908767 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="8.335079ms" I0127 14:11:52.908827 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kube-system/coredns-668d6bf9bc" duration="41.421µs" I0127 14:12:11.843103 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:12:21.941614 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:12:25.420107 1 namespace_controller.go:187] "Namespace has been deleted" logger="namespace-controller" namespace="test-pvc-ns" I0127 14:12:27.773532 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-66d46f75d6" duration="18.931643ms" I0127 14:12:27.780521 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-66d46f75d6" duration="6.959811ms" I0127 14:12:27.780574 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-66d46f75d6" duration="34.031µs" I0127 14:12:27.783603 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-66d46f75d6" duration="26.561µs" I0127 14:12:27.947495 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-856bdc4b95" duration="15.581879ms" I0127 14:12:27.955964 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-856bdc4b95" duration="8.438325ms" I0127 14:12:27.956054 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-856bdc4b95" duration="36.8µs" I0127 14:12:27.958565 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-856bdc4b95" duration="30.25µs" I0127 14:12:28.131501 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-7fdfc5cd79" duration="17.780702ms" I0127 14:12:28.139882 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-7fdfc5cd79" duration="8.355235ms" I0127 14:12:28.139936 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-7fdfc5cd79" duration="31.241µs" I0127 14:12:28.144179 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-7fdfc5cd79" duration="43.951µs" I0127 14:12:31.974066 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-66d46f75d6" duration="8.474156ms" I0127 14:12:31.974136 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-66d46f75d6" duration="47.681µs" I0127 14:12:32.069268 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:12:32.751297 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificaterequests.cert-manager.io" I0127 14:12:32.751318 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="challenges.acme.cert-manager.io" I0127 14:12:32.751334 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="orders.acme.cert-manager.io" I0127 14:12:32.751377 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="issuers.cert-manager.io" I0127 14:12:32.751397 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="certificates.cert-manager.io" I0127 14:12:32.751437 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:12:32.758626 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0127 14:12:32.851956 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:12:32.859008 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:12:33.975727 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-856bdc4b95" duration="5.428205ms" I0127 14:12:33.975815 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-cainjector-856bdc4b95" duration="43.901µs" I0127 14:12:35.976001 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-7fdfc5cd79" duration="47.871µs" I0127 14:12:41.986682 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-7fdfc5cd79" duration="8.168593ms" I0127 14:12:41.986756 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/cert-manager-webhook-7fdfc5cd79" duration="29.19µs" I0127 14:12:44.898517 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-7c9f8b8f7d" duration="16.071903ms" I0127 14:12:44.906466 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-7c9f8b8f7d" duration="7.9262ms" I0127 14:12:44.906503 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-7c9f8b8f7d" duration="24.97µs" I0127 14:12:44.909622 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-7c9f8b8f7d" duration="39.211µs" I0127 14:12:55.007165 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-7c9f8b8f7d" duration="46.06µs" I0127 14:12:58.014995 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-7c9f8b8f7d" duration="74.5µs" I0127 14:13:02.660376 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:13:02.863472 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0127 14:13:02.963775 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:13:06.028796 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-7c9f8b8f7d" duration="8.090821ms" I0127 14:13:06.028868 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="cert-manager/trust-manager-7c9f8b8f7d" duration="47.621µs" I0127 14:13:12.589807 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="19.253824ms" I0127 14:13:12.596122 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="6.292557ms" I0127 14:13:12.596159 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="20.41µs" I0127 14:13:12.599486 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="19.47µs" I0127 14:13:12.788008 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="15.519327ms" I0127 14:13:12.795787 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="7.759848ms" I0127 14:13:12.795819 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="19.37µs" I0127 14:13:12.799133 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="24.93µs" I0127 14:13:16.049067 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="4.895536ms" I0127 14:13:16.049107 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-864c79545c" duration="22.64µs" I0127 14:13:18.053956 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="4.835275ms" I0127 14:13:18.054001 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-operator/tekton-operator-webhook-b678db645" duration="23.3µs" E0127 14:13:27.911925 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: a1d51a98-2186-4188-b1b8-46ba216de907]" E0127 14:13:27.917244 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: a1d51a98-2186-4188-b1b8-46ba216de907]" E0127 14:13:27.928196 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: a1d51a98-2186-4188-b1b8-46ba216de907]" E0127 14:13:27.948646 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: a1d51a98-2186-4188-b1b8-46ba216de907]" E0127 14:13:27.988967 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: a1d51a98-2186-4188-b1b8-46ba216de907]" E0127 14:13:28.069131 1 garbagecollector.go:358] "error syncing item" err="unable to get REST mapping for operator.tekton.dev/v1alpha1/TektonConfig." logger="garbage-collector-controller" item="[operator.tekton.dev/v1alpha1/TektonConfig, namespace: , name: config, uid: a1d51a98-2186-4188-b1b8-46ba216de907]" I0127 14:13:32.857920 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelineruns.tekton.dev" I0127 14:13:32.857944 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="stepactions.tekton.dev" I0127 14:13:32.857957 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="pipelines.tekton.dev" I0127 14:13:32.857970 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="resolutionrequests.resolution.tekton.dev" I0127 14:13:32.857981 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="verificationpolicies.tekton.dev" I0127 14:13:32.857995 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="customruns.tekton.dev" I0127 14:13:32.858006 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="tasks.tekton.dev" I0127 14:13:32.858016 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="taskruns.tekton.dev" I0127 14:13:32.858069 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:13:32.970250 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0127 14:13:33.155772 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:13:33.971360 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:13:34.058962 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:13:38.061546 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="19.139541ms" I0127 14:13:38.069666 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="8.090329ms" I0127 14:13:38.069714 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="32.94µs" I0127 14:13:38.072875 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="28.3µs" I0127 14:13:38.148333 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="22.935439ms" I0127 14:13:38.157412 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="9.050437ms" I0127 14:13:38.157466 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="35.571µs" I0127 14:13:38.157630 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="51.901µs" I0127 14:13:38.191594 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="15.906987ms" I0127 14:13:38.198170 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="6.551399ms" I0127 14:13:38.198217 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="32.73µs" I0127 14:13:38.201324 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="31.02µs" I0127 14:13:38.547532 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="20.936734ms" I0127 14:13:38.563407 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="15.842977ms" I0127 14:13:38.563464 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="37.35µs" I0127 14:13:38.574866 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="28.739831ms" I0127 14:13:38.574926 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="40.72µs" I0127 14:13:38.586851 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="11.907668ms" I0127 14:13:38.586888 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="20.86µs" I0127 14:13:41.085506 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="57.31µs" I0127 14:13:43.235134 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:13:44.094140 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="56.66µs" I0127 14:13:46.103478 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="5.946914ms" I0127 14:13:46.103556 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="53.401µs" I0127 14:13:48.101253 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="55.351µs" W0127 14:13:49.403958 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:13:49.408509 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:13:50.110309 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="4.713594ms" I0127 14:13:50.110348 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-operator-proxy-webhook-64c9b98f89" duration="19.96µs" W0127 14:13:50.334176 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:13:50.342891 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:13:52.099211 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="8.20694ms" I0127 14:13:52.099274 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-controller-5dfff97774" duration="39.231µs" I0127 14:13:55.105757 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="8.488403ms" I0127 14:13:55.105822 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-events-controller-f58b9fbcd" duration="44.241µs" I0127 14:13:59.114626 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="8.202371ms" I0127 14:13:59.114699 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-webhook-65f6889cdf" duration="32.73µs" I0127 14:14:03.437840 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:14:03.978193 1 shared_informer.go:313] Waiting for caches to sync for garbage collector W0127 14:14:04.062406 1 shared_informer.go:597] resyncPeriod 15h39m45.733552794s is smaller than resyncCheckPeriod 23h33m53.473692162s and the informer has already started. Changing it to 23h33m53.473692162s I0127 14:14:04.062444 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggers.triggers.tekton.dev" I0127 14:14:04.062475 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="eventlisteners.triggers.tekton.dev" W0127 14:14:04.062487 1 shared_informer.go:597] resyncPeriod 16h17m0.623772697s is smaller than resyncCheckPeriod 23h33m53.473692162s and the informer has already started. Changing it to 23h33m53.473692162s I0127 14:14:04.062502 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggerbindings.triggers.tekton.dev" W0127 14:14:04.062512 1 shared_informer.go:597] resyncPeriod 19h53m34.433851461s is smaller than resyncCheckPeriod 23h33m53.473692162s and the informer has already started. Changing it to 23h33m53.473692162s I0127 14:14:04.062526 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="triggertemplates.triggers.tekton.dev" W0127 14:14:04.062535 1 shared_informer.go:597] resyncPeriod 14h37m2.850913899s is smaller than resyncCheckPeriod 23h33m53.473692162s and the informer has already started. Changing it to 23h33m53.473692162s I0127 14:14:04.062551 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="interceptors.triggers.tekton.dev" I0127 14:14:04.062592 1 shared_informer.go:313] Waiting for caches to sync for resource quota W0127 14:14:04.416505 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:14:04.416964 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:14:05.063277 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:14:05.078538 1 shared_informer.go:320] Caches are synced for garbage collector W0127 14:14:05.345159 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:14:05.345619 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:14:05.619937 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="16.167619ms" I0127 14:14:05.627770 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="7.803818ms" I0127 14:14:05.627874 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="55.711µs" I0127 14:14:05.631148 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="29.46µs" I0127 14:14:05.718939 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="15.604745ms" I0127 14:14:05.726592 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="7.626566ms" I0127 14:14:05.726640 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="33.65µs" I0127 14:14:05.729675 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="37.77µs" I0127 14:14:05.781858 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="18.671498ms" I0127 14:14:05.790310 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="8.424062ms" I0127 14:14:05.790412 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="36.18µs" I0127 14:14:05.792552 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="25.9µs" I0127 14:14:09.150016 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="8.23184ms" I0127 14:14:09.150101 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-controller-8444b7b678" duration="60.621µs" I0127 14:14:12.162087 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="10.290106ms" I0127 14:14:12.162197 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-webhook-f64478468" duration="44.59µs" I0127 14:14:14.152863 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="72.61µs" W0127 14:14:19.421338 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:14:19.421808 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:14:20.351167 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:14:20.351612 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:14:24.119128 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:14:25.160344 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="4.512564ms" I0127 14:14:25.160404 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-triggers-core-interceptors-7cc7cd5547" duration="36.221µs" I0127 14:14:26.889991 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="8.335782ms" I0127 14:14:26.894333 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="4.305612ms" I0127 14:14:26.894387 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="34.15µs" I0127 14:14:26.898087 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="29.451µs" I0127 14:14:31.186921 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="4.670505ms" I0127 14:14:31.187004 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="59.19µs" I0127 14:14:33.435802 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="15.820839ms" I0127 14:14:33.443631 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="7.805768ms" I0127 14:14:33.443685 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="36.431µs" I0127 14:14:33.446941 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="37.89µs" I0127 14:14:33.499468 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="15.730148ms" I0127 14:14:33.507071 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="7.578467ms" I0127 14:14:33.507127 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="31.74µs" I0127 14:14:33.516056 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="25.42µs" I0127 14:14:33.635904 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="15.952899ms" I0127 14:14:33.643427 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="7.490656ms" I0127 14:14:33.643478 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="33.08µs" I0127 14:14:33.650196 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="37.7µs" I0127 14:14:34.354134 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:14:34.450278 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:14:34.450781 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:14:35.471433 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:14:35.471967 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:14:38.204795 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="84.64µs" I0127 14:14:41.214372 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="5.913794ms" I0127 14:14:41.214425 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-retention-policy-agent-68c6b7cf5d" duration="28.98µs" I0127 14:14:43.217265 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="5.949715ms" I0127 14:14:43.217330 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-watcher-6c9b786b7" duration="42.94µs" W0127 14:14:49.455579 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:14:49.456420 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:14:50.476557 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:14:50.476972 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:15:03.753843 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="59.67µs" I0127 14:15:03.769064 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="8.10581ms" I0127 14:15:03.769112 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-results-api-546b75cb88" duration="30.421µs" W0127 14:15:04.466100 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:15:04.466649 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:15:04.962487 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:15:05.070411 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="extensions.dashboard.tekton.dev" I0127 14:15:05.070458 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:15:05.086823 1 shared_informer.go:313] Waiting for caches to sync for garbage collector W0127 14:15:05.480406 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:15:05.480817 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:15:06.087716 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:15:06.170670 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:15:07.986172 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="19.084993ms" I0127 14:15:07.992309 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="6.106726ms" I0127 14:15:07.992433 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="40.541µs" I0127 14:15:07.995570 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="28.56µs" I0127 14:15:11.271049 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="76.4µs" I0127 14:15:11.287125 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="8.207742ms" I0127 14:15:11.287177 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="35.47µs" I0127 14:15:15.789405 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="15.043753ms" I0127 14:15:15.797556 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="8.12394ms" I0127 14:15:15.797686 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="45.11µs" I0127 14:15:15.800919 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="35.681µs" E0127 14:15:16.308189 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"rbac.authorization.k8s.io/v1\", Kind:\"ClusterRole\", Name:\"tekton-dashboard-backend-edit\", UID:\"741e45c1-ee02-4c8c-bb77-795a1ef73a1e\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-static-vwrb7\", UID:\"daee58b3-2e41-4c04-9031-4d2364cbf2cc\", Controller:(*bool)(0xc00347d800), BlockOwnerDeletion:(*bool)(0xc00347d801)}}}: clusterroles.rbac.authorization.k8s.io \"tekton-dashboard-backend-edit\" not found" logger="UnhandledError" E0127 14:15:16.332716 1 garbagecollector.go:360] "Unhandled Error" err="error syncing item &garbagecollector.node{identity:garbagecollector.objectReference{OwnerReference:v1.OwnerReference{APIVersion:\"rbac.authorization.k8s.io/v1\", Kind:\"ClusterRole\", Name:\"tekton-dashboard-backend-view\", UID:\"cfe0f35e-da3d-49f6-973a-b58bd04fde34\", Controller:(*bool)(nil), BlockOwnerDeletion:(*bool)(nil)}, Namespace:\"\"}, dependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:1}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, dependents:map[*garbagecollector.node]struct {}{}, deletingDependents:false, deletingDependentsLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, beingDeleted:false, beingDeletedLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, virtual:false, virtualLock:sync.RWMutex{w:sync.Mutex{state:0, sema:0x0}, writerSem:0x0, readerSem:0x0, readerCount:atomic.Int32{_:atomic.noCopy{}, v:0}, readerWait:atomic.Int32{_:atomic.noCopy{}, v:0}}, owners:[]v1.OwnerReference{v1.OwnerReference{APIVersion:\"operator.tekton.dev/v1alpha1\", Kind:\"TektonInstallerSet\", Name:\"dashboard-main-static-vwrb7\", UID:\"daee58b3-2e41-4c04-9031-4d2364cbf2cc\", Controller:(*bool)(0xc002a7df60), BlockOwnerDeletion:(*bool)(0xc002a7df61)}}}: clusterroles.rbac.authorization.k8s.io \"tekton-dashboard-backend-view\" not found" logger="UnhandledError" E0127 14:15:17.056111 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0127 14:15:17.292537 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="8.313372ms" I0127 14:15:17.292650 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-7c567d6b77" duration="84.36µs" I0127 14:15:17.322773 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="22.926352ms" I0127 14:15:17.328577 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="5.777063ms" I0127 14:15:17.328655 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="57.551µs" I0127 14:15:17.488993 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="3.551357ms" I0127 14:15:17.489053 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="34.17µs" I0127 14:15:17.616766 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="82.61µs" I0127 14:15:17.725769 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="8.275272ms" I0127 14:15:17.725826 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="35.26µs" I0127 14:15:17.855588 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="54.47µs" I0127 14:15:18.292200 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="58.51µs" I0127 14:15:18.299845 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-pipelines-remote-resolvers-dbc8f9b75" duration="42.081µs" I0127 14:15:18.306501 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="81.67µs" I0127 14:15:18.311407 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="34.56µs" I0127 14:15:18.323012 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-dashboard-56b8f9f7d4" duration="5.05µs" W0127 14:15:18.554688 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:15:18.555120 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:15:18.555480 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:15:18.555504 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0127 14:15:18.977443 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="15.344595ms" I0127 14:15:18.983260 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="5.796143ms" I0127 14:15:18.983306 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="34.231µs" I0127 14:15:18.986340 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="29.46µs" W0127 14:15:19.470752 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:15:19.471289 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:15:20.163290 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-668fdd7d95" duration="18.754441ms" I0127 14:15:20.170607 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-668fdd7d95" duration="7.292884ms" I0127 14:15:20.170651 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-668fdd7d95" duration="27.831µs" I0127 14:15:20.176593 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-668fdd7d95" duration="26.84µs" I0127 14:15:20.308923 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="7.9246ms" I0127 14:15:20.308988 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-56b4589864" duration="44.1µs" I0127 14:15:20.332871 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="17.953044ms" I0127 14:15:20.341452 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="8.555554ms" I0127 14:15:20.341531 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-8684cbd465" duration="17.536222ms" I0127 14:15:20.341648 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="34.99µs" I0127 14:15:20.347050 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-8684cbd465" duration="5.503611ms" I0127 14:15:20.347108 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-8684cbd465" duration="38.48µs" I0127 14:15:20.350933 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-8684cbd465" duration="30.71µs" W0127 14:15:20.370240 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:15:20.370830 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:15:20.371233 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:15:20.371254 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:15:20.484561 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:15:20.485039 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:15:20.510386 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="67.43µs" I0127 14:15:20.519144 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-7686bbff79" duration="18.525809ms" I0127 14:15:20.531988 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-7686bbff79" duration="12.818826ms" I0127 14:15:20.532037 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-7686bbff79" duration="31.36µs" I0127 14:15:20.536211 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-7686bbff79" duration="29.9µs" I0127 14:15:21.299847 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="55.01µs" I0127 14:15:21.307718 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="tekton-pipelines/tekton-chains-controller-67b58566c9" duration="57.241µs" I0127 14:15:23.349992 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="0s" I0127 14:15:23.357796 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:15:23.365798 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:15:23.365883 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:15:23.400005 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:15:25.289316 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-57d7466bcf" duration="18.707298ms" I0127 14:15:25.296974 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-57d7466bcf" duration="7.631931ms" I0127 14:15:25.297024 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-57d7466bcf" duration="27.1µs" I0127 14:15:25.305629 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-57d7466bcf" duration="20.901µs" I0127 14:15:25.321045 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-668fdd7d95" duration="8.012974ms" I0127 14:15:25.321114 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-controller-668fdd7d95" duration="44.45µs" W0127 14:15:25.458064 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:15:25.458666 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:15:25.459079 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:15:25.459101 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0127 14:15:27.126354 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="18.093714ms" I0127 14:15:27.143644 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="17.253917ms" I0127 14:15:27.143716 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="44.95µs" I0127 14:15:27.149843 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="16.52µs" I0127 14:15:27.320786 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-8684cbd465" duration="69.561µs" I0127 14:15:30.343166 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-7686bbff79" duration="14.316784ms" I0127 14:15:30.343224 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-webhook-7686bbff79" duration="36.91µs" W0127 14:15:34.075491 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:15:34.076084 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:15:34.076513 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:15:34.076531 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:15:34.478448 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:15:34.478908 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:15:35.458317 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:15:35.488834 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:15:35.489390 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:15:36.093311 1 shared_informer.go:313] Waiting for caches to sync for garbage collector W0127 14:15:36.173565 1 shared_informer.go:597] resyncPeriod 14h30m40.157410577s is smaller than resyncCheckPeriod 23h33m53.473692162s and the informer has already started. Changing it to 23h33m53.473692162s I0127 14:15:36.173616 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="repositories.pipelinesascode.tekton.dev" I0127 14:15:36.173654 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:15:36.173672 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:15:36.193580 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:15:40.631904 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-8684cbd465" duration="969.754546ms" I0127 14:15:40.631956 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="pipelines-as-code/pipelines-as-code-watcher-8684cbd465" duration="29.341µs" I0127 14:15:48.367502 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:15:49.374333 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" W0127 14:15:49.484491 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:15:49.484982 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:15:50.493624 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:15:50.494049 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:15:51.376062 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-57d7466bcf" duration="40.9µs" I0127 14:15:52.379917 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:15:53.386627 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:15:53.389656 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="6.925525ms" I0127 14:15:53.389716 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kind-registry/registry-68dcdc78fb" duration="19.14µs" I0127 14:15:53.392795 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" W0127 14:15:53.409392 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:15:53.409819 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:15:53.410204 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:15:53.410223 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0127 14:15:54.034920 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="19.044842ms" I0127 14:15:54.042492 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="7.54507ms" I0127 14:15:54.042531 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="22.48µs" I0127 14:15:54.056577 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="25.67µs" I0127 14:15:54.393477 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:16:01.018266 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="9.379895ms" I0127 14:16:01.029485 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="11.190669ms" I0127 14:16:01.037462 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="7.946343ms" I0127 14:16:01.037518 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="31.31µs" I0127 14:16:01.077667 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-857db5d775" duration="8.204225ms" I0127 14:16:01.081619 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-857db5d775" duration="3.926921ms" I0127 14:16:01.081682 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-857db5d775" duration="36.41µs" I0127 14:16:01.084014 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-857db5d775" duration="35.01µs" I0127 14:16:01.145838 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-7b74646946" duration="8.472947ms" I0127 14:16:01.145886 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-cleanup-controller-7b74646946" duration="26.15µs" I0127 14:16:01.206531 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-86d8747f78" duration="5.635795ms" I0127 14:16:01.206586 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-reports-controller-86d8747f78" duration="34.03µs" I0127 14:16:02.402045 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-57d7466bcf" duration="16.024987ms" I0127 14:16:02.402125 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="dex/dex-57d7466bcf" duration="23.12µs" I0127 14:16:02.417617 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="5.764536ms" I0127 14:16:02.417703 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="49.731µs" W0127 14:16:04.492303 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:16:04.492721 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:16:05.417846 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="72.431µs" W0127 14:16:05.498510 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:16:05.499052 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:16:06.009957 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:16:06.177487 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespaceddeletingpolicies.policies.kyverno.io" I0127 14:16:06.177514 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authcodes.dex.coreos.com" I0127 14:16:06.177526 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="updaterequests.kyverno.io" I0127 14:16:06.177540 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="authrequests.dex.coreos.com" I0127 14:16:06.177555 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="ephemeralreports.reports.kyverno.io" I0127 14:16:06.177567 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicetokens.dex.coreos.com" I0127 14:16:06.177579 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="signingkeies.dex.coreos.com" I0127 14:16:06.177591 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="passwords.dex.coreos.com" I0127 14:16:06.177602 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedimagevalidatingpolicies.policies.kyverno.io" I0127 14:16:06.177616 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="namespacedvalidatingpolicies.policies.kyverno.io" I0127 14:16:06.177627 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyreports.wgpolicyk8s.io" I0127 14:16:06.177638 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="offlinesessionses.dex.coreos.com" I0127 14:16:06.177651 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="oauth2clients.dex.coreos.com" I0127 14:16:06.177660 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="refreshtokens.dex.coreos.com" I0127 14:16:06.177670 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.kyverno.io" I0127 14:16:06.177684 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="devicerequests.dex.coreos.com" I0127 14:16:06.177706 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policyexceptions.policies.kyverno.io" I0127 14:16:06.177716 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="connectors.dex.coreos.com" I0127 14:16:06.177728 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="policies.kyverno.io" I0127 14:16:06.177752 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="cleanuppolicies.kyverno.io" I0127 14:16:06.177884 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:16:06.200481 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0127 14:16:06.508437 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="58.731µs" I0127 14:16:07.378233 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:16:07.423501 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:16:07.434788 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-857db5d775" duration="4.176513ms" I0127 14:16:07.434872 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-background-controller-857db5d775" duration="44.73µs" I0127 14:16:07.500655 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:16:08.428087 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:16:08.429157 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:16:08.564191 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:16:09.432379 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:16:09.434227 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="101.82µs" I0127 14:16:09.436285 1 job_controller.go:598] "enqueueing job" logger="job-controller" key="tekton-pipelines/tekton-chains-signing-secret" delay="1s" I0127 14:16:16.052668 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:16:19.338383 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="67.93µs" I0127 14:16:19.359933 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="13.155154ms" I0127 14:16:19.359984 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="kyverno/kyverno-admission-controller-6476965c9b" duration="31.22µs" W0127 14:16:19.498364 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:16:19.498807 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:16:20.503011 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:16:20.503419 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:16:21.073120 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:16:21.073891 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:16:21.074285 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:16:21.074303 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:16:34.506440 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:16:34.506896 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:16:35.506999 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:16:35.507461 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:16:36.310847 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:16:37.382199 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="promotionruns.appstudio.redhat.com" I0127 14:16:37.382220 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargetclaims.appstudio.redhat.com" I0127 14:16:37.382241 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="environments.appstudio.redhat.com" I0127 14:16:37.382252 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="enterprisecontractpolicies.appstudio.redhat.com" I0127 14:16:37.382276 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshots.appstudio.redhat.com" I0127 14:16:37.382299 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="deploymenttargets.appstudio.redhat.com" I0127 14:16:37.382319 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="components.appstudio.redhat.com" I0127 14:16:37.382332 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="snapshotenvironmentbindings.appstudio.redhat.com" I0127 14:16:37.382344 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentdetectionqueries.appstudio.redhat.com" I0127 14:16:37.382357 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="applications.appstudio.redhat.com" I0127 14:16:37.382453 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:16:37.509460 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0127 14:16:38.483308 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:16:38.609643 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:16:45.929362 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-68b74948f" duration="25.294233ms" I0127 14:16:45.942652 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-68b74948f" duration="13.257431ms" I0127 14:16:45.942700 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-68b74948f" duration="28.95µs" W0127 14:16:49.510732 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:16:49.511367 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:16:50.512181 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:16:50.512613 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:16:50.520514 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-68b74948f" duration="50.42µs" I0127 14:16:56.918933 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:16:57.721706 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-85854c4cdc" duration="18.581143ms" I0127 14:16:57.727966 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-85854c4cdc" duration="6.225847ms" I0127 14:16:57.728006 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-85854c4cdc" duration="21.73µs" I0127 14:16:57.738513 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-85854c4cdc" duration="27.79µs" W0127 14:16:59.489299 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:16:59.489786 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:16:59.490199 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:16:59.490223 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0127 14:17:01.529796 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-68b74948f" duration="5.181739ms" I0127 14:17:01.529852 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="release-service/release-service-controller-manager-68b74948f" duration="27.861µs" I0127 14:17:02.551163 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-85854c4cdc" duration="47.23µs" W0127 14:17:04.519115 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:17:04.519542 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:17:05.515448 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:17:05.515912 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:17:07.045390 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:17:07.939229 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-6ddd78bf44" duration="19.062305ms" I0127 14:17:07.947140 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-6ddd78bf44" duration="7.88324ms" I0127 14:17:07.947208 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-6ddd78bf44" duration="41.681µs" I0127 14:17:07.955910 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-6ddd78bf44" duration="18.92µs" I0127 14:17:08.487264 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="componentgroups.appstudio.redhat.com" I0127 14:17:08.487290 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplanadmissions.appstudio.redhat.com" I0127 14:17:08.487322 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalrequests.appstudio.redhat.com" I0127 14:17:08.487334 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseserviceconfigs.appstudio.redhat.com" I0127 14:17:08.487346 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releaseplans.appstudio.redhat.com" I0127 14:17:08.487372 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="integrationtestscenarios.appstudio.redhat.com" I0127 14:17:08.487383 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="internalservicesconfigs.appstudio.redhat.com" I0127 14:17:08.487395 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="releases.appstudio.redhat.com" I0127 14:17:08.487489 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:17:08.615923 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0127 14:17:10.393567 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="18.081509ms" I0127 14:17:10.404309 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="10.718302ms" I0127 14:17:10.404358 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="28.281µs" I0127 14:17:10.404393 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="13.05µs" I0127 14:17:10.404449 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="24.27µs" I0127 14:17:10.587768 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:17:10.616050 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:17:13.598189 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-85854c4cdc" duration="4.604845ms" I0127 14:17:13.598235 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="build-service/build-service-controller-manager-85854c4cdc" duration="27.85µs" I0127 14:17:13.857228 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="15.356308ms" I0127 14:17:13.870992 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="13.733955ms" I0127 14:17:13.871057 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="35.36µs" I0127 14:17:14.579167 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-6ddd78bf44" duration="41.37µs" I0127 14:17:16.586207 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="45.241µs" I0127 14:17:17.331504 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:17:17.601659 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="8.403414ms" I0127 14:17:17.601705 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="namespace-lister/namespace-lister-584d4574c4" duration="24.871µs" W0127 14:17:19.524214 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:17:19.524637 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:17:20.520643 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:17:20.521141 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:17:24.613328 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="51.791µs" I0127 14:17:25.592495 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-6ddd78bf44" duration="7.675149ms" I0127 14:17:25.592549 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="integration-service/integration-service-controller-manager-6ddd78bf44" duration="32.45µs" I0127 14:17:25.616631 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="45.31µs" I0127 14:17:27.445950 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:17:31.632150 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="49.66µs" I0127 14:17:32.635787 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="82.131µs" W0127 14:17:34.536467 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:17:34.537048 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:17:35.524930 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:17:35.525394 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:17:37.320566 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:17:37.321071 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:17:37.321500 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:17:37.321524 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0127 14:17:37.674595 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:17:41.660858 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="58.521µs" W0127 14:17:49.541060 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:17:49.541502 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:17:50.529339 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:17:50.529804 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:17:58.129694 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:18:04.549668 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:18:04.550145 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:18:05.533966 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:18:05.534392 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:18:08.489975 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:18:12.674217 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="7.66165ms" I0127 14:18:12.674293 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="40.6µs" W0127 14:18:19.554343 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:18:19.554839 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:18:20.538815 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:18:20.539284 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:18:21.546059 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:18:21.546516 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:18:21.546945 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:18:21.546966 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:18:34.561893 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:18:34.562327 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:18:35.543577 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:18:35.544053 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:18:42.681903 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="7.485409ms" I0127 14:18:42.682034 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="konflux-ui/proxy-5bd49fd665" duration="30.73µs" W0127 14:18:49.566551 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:18:49.567047 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:18:50.547853 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:18:50.548309 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:18:51.650147 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7cd8c97687" duration="20.572433ms" I0127 14:18:51.656108 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7cd8c97687" duration="5.931066ms" I0127 14:18:51.656161 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7cd8c97687" duration="35.57µs" I0127 14:18:51.659202 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7cd8c97687" duration="17.04µs" I0127 14:19:00.835869 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7cd8c97687" duration="66.72µs" W0127 14:19:04.574205 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:19:04.574652 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:19:05.553182 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:19:05.553620 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:19:09.550417 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:19:09.725470 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:19:09.725946 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:19:09.726391 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:19:09.726410 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0127 14:19:10.603822 1 resource_quota_monitor.go:227] "QuotaMonitor created object count evaluator" logger="resourcequota-controller" resource="imagerepositories.appstudio.redhat.com" I0127 14:19:10.603878 1 shared_informer.go:313] Waiting for caches to sync for resource quota I0127 14:19:10.634022 1 shared_informer.go:313] Waiting for caches to sync for garbage collector I0127 14:19:10.634069 1 shared_informer.go:320] Caches are synced for garbage collector I0127 14:19:10.704532 1 shared_informer.go:320] Caches are synced for resource quota I0127 14:19:11.847164 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7cd8c97687" duration="7.93802ms" I0127 14:19:11.847297 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="image-controller/image-controller-controller-manager-7cd8c97687" duration="31.5µs" I0127 14:19:13.156074 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6fc4d499c5" duration="18.19549ms" I0127 14:19:13.163463 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6fc4d499c5" duration="7.361642ms" I0127 14:19:13.163518 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6fc4d499c5" duration="34.141µs" I0127 14:19:13.169793 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6fc4d499c5" duration="29.63µs" I0127 14:19:14.874966 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6fc4d499c5" duration="6.119103ms" I0127 14:19:14.875010 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-6fc4d499c5" duration="20.6µs" I0127 14:19:14.896805 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="15.709822ms" I0127 14:19:14.904901 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="8.063057ms" I0127 14:19:14.904955 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="34.01µs" I0127 14:19:15.298415 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="46.731µs" I0127 14:19:15.873804 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="47.42µs" I0127 14:19:15.883385 1 replica_set.go:679] "Finished syncing" logger="replicaset-controller" kind="ReplicaSet" key="smee-client/gosmee-client-844f4bd847" duration="34.85µs" W0127 14:19:19.578844 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:19:19.579315 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:19:19.589174 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:19:20.558028 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:19:20.558691 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:19:34.585584 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:19:34.586021 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:19:35.562769 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:19:35.563207 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:19:40.120167 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:19:40.228932 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:19:40.229364 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:19:40.229765 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:19:40.229787 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:19:49.594449 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:19:49.594935 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:19:50.165588 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:19:50.566706 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:19:50.567154 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:20:04.602233 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:20:04.602696 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:20:05.571320 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:20:05.572187 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:20:19.605778 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:20:19.606241 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:20:20.576503 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:20:20.576972 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:20:27.356269 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:20:27.356729 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:20:27.357187 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:20:27.357206 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0127 14:20:31.187562 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:20:34.612752 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:20:34.613191 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:20:35.580009 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:20:35.580449 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:20:41.563812 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:20:49.616902 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:20:49.617332 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:20:50.584389 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:20:50.584843 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:20:51.642280 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:21:01.923908 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:21:04.624710 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:21:04.625118 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:21:05.589250 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:21:05.589694 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:21:05.719961 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:21:05.720410 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:21:05.720831 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:21:05.720850 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:21:19.629417 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:21:19.629900 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:21:20.594203 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:21:20.594788 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:21:22.187682 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" I0127 14:21:32.492773 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:21:34.637885 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:21:34.638424 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:21:35.599050 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:21:35.599455 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:21:49.359885 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:21:49.360285 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:21:49.360664 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:21:49.360683 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:21:49.643325 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:21:49.643837 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:21:50.603331 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:21:50.603774 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:22:04.651594 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:22:04.652087 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:22:05.608034 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:22:05.608518 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:22:19.656210 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:22:19.656687 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:22:20.611926 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:22:20.612388 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:22:34.663535 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:22:34.663996 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:22:35.616097 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:22:35.616857 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:22:41.465927 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:22:41.466342 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:22:41.466790 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:22:41.466811 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:22:49.668826 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:22:49.669338 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:22:50.621290 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:22:50.621756 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:23:04.391559 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:23:04.676188 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:23:04.676625 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:23:05.626431 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:23:05.626983 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:23:19.680991 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:23:19.681605 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:23:20.630708 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:23:20.631141 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:23:24.756877 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:23:28.240357 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:23:28.240811 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:23:28.241622 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:23:28.241642 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:23:34.689362 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:23:34.689860 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:23:35.634657 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:23:35.635066 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:23:49.694555 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:23:49.695047 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:23:50.639108 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:23:50.639516 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:24:00.834899 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:24:00.835317 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:24:00.835773 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:24:00.835792 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:24:04.702278 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:24:04.702705 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:24:05.368789 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:24:05.644059 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:24:05.644526 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:24:15.562942 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:24:19.707068 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:24:19.707535 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:24:20.648075 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:24:20.648522 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:24:34.715504 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:24:34.715943 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:24:35.652657 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:24:35.653120 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:24:49.721375 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:24:49.721829 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:24:50.656867 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:24:50.657278 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:24:56.393647 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:24:56.394152 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:24:56.394533 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:24:56.394553 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:25:04.728654 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:25:04.729097 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:25:05.661632 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:25:05.662257 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:25:19.733256 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:25:19.733700 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:25:20.665900 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:25:20.666312 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:25:27.458568 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:25:34.740925 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:25:34.741363 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:25:35.670394 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:25:35.670879 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:25:37.833783 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:25:49.745436 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:25:49.745884 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:25:50.674999 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:25:50.675403 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:25:54.075599 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:25:54.076108 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:25:54.076572 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:25:54.076593 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" W0127 14:26:04.753797 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:26:04.754349 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:26:05.678820 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:26:05.679241 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:26:19.758524 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:26:19.759039 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:26:20.683963 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:26:20.684456 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" I0127 14:26:28.466658 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane" W0127 14:26:34.766153 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:26:34.766575 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-pipelines-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:26:35.687963 1 type.go:183] The watchlist request for pods ended with an error, falling back to the standard LIST semantics, err = the server could not find the requested resource (get pods.metrics.k8s.io) E0127 14:26:35.688368 1 horizontal.go:275] "Unhandled Error" err="failed to compute desired number of replicas based on listed metrics for Deployment/tekton-pipelines/tekton-operator-proxy-webhook: invalid metrics (1 invalid out of 1), first error is: failed to get cpu resource metric value: failed to get cpu utilization: unable to get metrics for resource cpu: unable to fetch metrics from resource metrics API: the server could not find the requested resource (get pods.metrics.k8s.io)" logger="UnhandledError" W0127 14:26:37.511442 1 reflector.go:362] The watchlist request ended with an error, falling back to the standard LIST/WATCH semantics because making progress is better than deadlocking, err = the server could not find the requested resource E0127 14:26:37.511903 1 metadata.go:231] "The watchlist request ended with an error, falling back to the standard LIST semantics" err="the server could not find the requested resource" resource="dashboard.tekton.dev/v1alpha1, Resource=extensions" W0127 14:26:37.512325 1 reflector.go:569] k8s.io/client-go/metadata/metadatainformer/informer.go:138: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource E0127 14:26:37.512346 1 reflector.go:166] "Unhandled Error" err="k8s.io/client-go/metadata/metadatainformer/informer.go:138: Failed to watch *v1.PartialObjectMetadata: failed to list *v1.PartialObjectMetadata: the server could not find the requested resource" logger="UnhandledError" I0127 14:26:38.539343 1 range_allocator.go:247] "Successfully synced" logger="node-ipam-controller" key="kind-mapt-control-plane"