{
    "apiVersion": "v1",
    "items": [
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:22:25Z",
                "name": "kube-root-ca.crt",
                "namespace": "cert-manager",
                "resourceVersion": "583",
                "uid": "d7cb2481-5ec3-4102-b4e5-3fc7756f5e96"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:21:33Z",
                "name": "kube-root-ca.crt",
                "namespace": "default",
                "resourceVersion": "362",
                "uid": "da6e207b-fa98-48c4-b863-9a902bbf6467"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "prune_images.py": "import argparse\nimport itertools\nimport json\nimport logging\nimport os\nimport re\nimport time\n\nfrom collections.abc import Iterator\nfrom http.client import HTTPResponse\nfrom typing import Any, Dict, List\nfrom urllib.error import HTTPError\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\n\nlogging.basicConfig(\n    format=\"%(asctime)s - %(levelname)s - %(message)s\", level=logging.INFO\n)\nLOGGER = logging.getLogger(__name__)\nQUAY_API_URL = \"https://quay.io/api/v1\"\n\nprocessed_repos_counter = itertools.count()\n\n\nImageRepo = Dict[str, Any]\n\n\ndef get_quay_tags(quay_token: str, namespace: str, name: str) -\u003e Dict[str, Any]:\n    next_page = None\n    resp: HTTPResponse\n\n    all_tags = {}\n    while True:\n        query_args = {\"limit\": 100, \"onlyActiveTags\": True}\n        if next_page is not None:\n            query_args[\"page\"] = next_page\n\n        api_url = f\"{QUAY_API_URL}/repository/{namespace}/{name}/tag/?{urlencode(query_args)}\"\n        request = Request(api_url, headers={\n            \"Authorization\": f\"Bearer {quay_token}\",\n        })\n\n        try:\n            with urlopen(request) as resp:\n                if resp.status != 200:\n                    raise RuntimeError(resp.reason)\n                json_data = json.loads(resp.read())\n        except HTTPError as ex:\n            if ex.status == 404:\n                LOGGER.info(\"Repository doesn't exist anymore %s/%s\", namespace, name)\n                return {}\n\n            if ex.status == 502 or ex.status == 504:\n                LOGGER.info(\"Gateway error, will retry\")\n                time.sleep(1)\n                continue\n            raise\n        except json.JSONDecodeError:\n            LOGGER.info(\"Json decoder error, will retry\")\n            continue\n\n        tags = json_data.get(\"tags\", [])\n        # store only name \u0026 manifest_digest keys, as others aren't used and take memory\n        all_tags.update({tag[\"name\"]: tag[\"manifest_digest\"] for tag in tags})\n\n        if not tags:\n            LOGGER.debug(\"No tags found.\")\n            break\n\n        page = json_data.get(\"page\", None)\n        additional = json_data.get(\"has_additional\", False)\n\n        if additional:\n            next_page = page + 1\n        else:\n            break\n\n    return all_tags\n\n\ndef delete_image_tag(quay_token: str, namespace: str, name: str, tag: str) -\u003e None:\n    api_url = f\"{QUAY_API_URL}/repository/{namespace}/{name}/tag/{tag}\"\n    request = Request(api_url, method=\"DELETE\", headers={\n        \"Authorization\": f\"Bearer {quay_token}\",\n    })\n    resp: HTTPResponse\n    try:\n        with urlopen(request) as resp:\n            if resp.status != 200 and resp.status != 204:\n                raise RuntimeError(resp.reason)\n\n    except HTTPError as ex:\n        # ignore if not found\n        if ex.status != 404:\n            raise(ex)\n\n\ndef manifest_exists(quay_token: str, namespace: str, name: str, manifest: str) -\u003e bool:\n    api_url = f\"{QUAY_API_URL}/repository/{namespace}/{name}/manifest/{manifest}\"\n    request = Request(api_url, headers={\n        \"Authorization\": f\"Bearer {quay_token}\",\n    })\n    resp: HTTPResponse\n    manifest_exists = True\n    try:\n        with urlopen(request) as resp:\n            if resp.status != 200 and resp.status != 204:\n                raise RuntimeError(resp.reason)\n\n    except HTTPError as ex:\n        if ex.status != 404:\n            raise(ex)\n        else:\n            manifest_exists = False\n\n    return manifest_exists\n\n\ndef remove_tags(tags_map: Dict[str, Any], quay_token: str, namespace: str, name: str, dry_run: bool = False) -\u003e None:\n    image_digests = set(tags_map.values())\n    # sha without any extension is clair report\n    tag_regex = re.compile(r\"^sha256-([0-9a-f]+)(\\.sbom|\\.att|\\.src|\\.sig|\\.dockerfile)?$\")\n    manifests_checked = {}\n    for tag_name in tags_map:\n        # attestation or sbom image\n        if (match := tag_regex.match(tag_name)) is not None:\n            if f\"sha256:{match.group(1)}\" not in image_digests:\n                # verify that manifest really doesn't exist, because if tag was removed, it won't be in tag list, but may still be in the registry\n                manifest_existence = manifests_checked.get(f\"sha256:{match.group(1)}\")\n                if manifest_existence is None:\n                    manifest_existence = manifest_exists(quay_token, namespace, name, f\"sha256:{match.group(1)}\")\n                    manifests_checked[f\"sha256:{match.group(1)}\"] = manifest_existence\n\n                if not manifest_existence:\n                    if dry_run:\n                        LOGGER.info(\"Tag %s from %s/%s should be removed\", tag_name, namespace, name)\n                    else:\n                        LOGGER.info(\"Removing tag %s from %s/%s\", tag_name, namespace, name)\n                        delete_image_tag(quay_token, namespace, name, tag_name)\n\n        elif tag_name.endswith(\".src\"):\n            to_delete = False\n\n            binary_tag = tag_name.removesuffix(\".src\")\n            if binary_tag not in tags_map:\n                to_delete = True\n            else:\n                manifest_digest = tags_map[binary_tag]\n                new_src_tag = f\"{manifest_digest.replace(':', '-')}.src\"\n                to_delete = new_src_tag in tags_map\n\n            if to_delete:\n                LOGGER.info(\"Removing deprecated tag %s\", tag_name)\n                delete_image_tag(quay_token, namespace, name, tag_name)\n        else:\n            LOGGER.debug(\"%s is not in a known type to be deleted.\", tag_name)\n\n\ndef process_repositories(repos: List[ImageRepo], quay_token: str, dry_run: bool = False) -\u003e None:\n    for repo in repos:\n        namespace = repo[\"namespace\"]\n        name = repo[\"name\"]\n        # skip huge repository for which we can't get all tags\n        if name == \"ocp-art-tenant/art-images\":\n            continue\n        LOGGER.info(\"Processing repository %s: %s/%s\", next(processed_repos_counter), namespace, name)\n\n        all_tags = get_quay_tags(quay_token, namespace, name)\n\n        if not all_tags:\n            continue\n\n        remove_tags(all_tags, quay_token, namespace, name, dry_run=dry_run)\n\n\ndef fetch_image_repos(access_token: str, namespace: str) -\u003e Iterator[List[ImageRepo]]:\n    next_page = None\n    resp: HTTPResponse\n    while True:\n        query_args = {\"namespace\": namespace}\n        if next_page is not None:\n            query_args[\"next_page\"] = next_page\n\n        api_url = f\"{QUAY_API_URL}/repository?{urlencode(query_args)}\"\n        request = Request(api_url, headers={\n            \"Authorization\": f\"Bearer {access_token}\",\n        })\n\n        try:\n            with urlopen(request) as resp:\n                if resp.status != 200:\n                    raise RuntimeError(resp.reason)\n                json_data = json.loads(resp.read())\n        except json.JSONDecodeError:\n            LOGGER.info(\"Json decoder error, will retry\")\n            continue\n\n        repos = json_data.get(\"repositories\", [])\n        if not repos:\n            LOGGER.debug(\"No image repository is found.\")\n            break\n\n        yield repos\n\n        if (next_page := json_data.get(\"next_page\", None)) is None:\n            break\n\n\ndef main():\n    token = os.getenv(\"QUAY_TOKEN\")\n    if not token:\n        raise ValueError(\"The token required for access to Quay API is missing!\")\n\n    args = parse_args()\n\n    for image_repos in fetch_image_repos(token, args.namespace):\n        process_repositories(image_repos, token, dry_run=args.dry_run)\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--namespace\", required=True)\n    parser.add_argument(\"--dry-run\", action=\"store_true\")\n    args = parser.parse_args()\n    return args\n\n\nif __name__ == \"__main__\":\n    main()\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"prune_images.py\":\"import argparse\\nimport itertools\\nimport json\\nimport logging\\nimport os\\nimport re\\nimport time\\n\\nfrom collections.abc import Iterator\\nfrom http.client import HTTPResponse\\nfrom typing import Any, Dict, List\\nfrom urllib.error import HTTPError\\nfrom urllib.parse import urlencode\\nfrom urllib.request import Request, urlopen\\n\\nlogging.basicConfig(\\n    format=\\\"%(asctime)s - %(levelname)s - %(message)s\\\", level=logging.INFO\\n)\\nLOGGER = logging.getLogger(__name__)\\nQUAY_API_URL = \\\"https://quay.io/api/v1\\\"\\n\\nprocessed_repos_counter = itertools.count()\\n\\n\\nImageRepo = Dict[str, Any]\\n\\n\\ndef get_quay_tags(quay_token: str, namespace: str, name: str) -\\u003e Dict[str, Any]:\\n    next_page = None\\n    resp: HTTPResponse\\n\\n    all_tags = {}\\n    while True:\\n        query_args = {\\\"limit\\\": 100, \\\"onlyActiveTags\\\": True}\\n        if next_page is not None:\\n            query_args[\\\"page\\\"] = next_page\\n\\n        api_url = f\\\"{QUAY_API_URL}/repository/{namespace}/{name}/tag/?{urlencode(query_args)}\\\"\\n        request = Request(api_url, headers={\\n            \\\"Authorization\\\": f\\\"Bearer {quay_token}\\\",\\n        })\\n\\n        try:\\n            with urlopen(request) as resp:\\n                if resp.status != 200:\\n                    raise RuntimeError(resp.reason)\\n                json_data = json.loads(resp.read())\\n        except HTTPError as ex:\\n            if ex.status == 404:\\n                LOGGER.info(\\\"Repository doesn't exist anymore %s/%s\\\", namespace, name)\\n                return {}\\n\\n            if ex.status == 502 or ex.status == 504:\\n                LOGGER.info(\\\"Gateway error, will retry\\\")\\n                time.sleep(1)\\n                continue\\n            raise\\n        except json.JSONDecodeError:\\n            LOGGER.info(\\\"Json decoder error, will retry\\\")\\n            continue\\n\\n        tags = json_data.get(\\\"tags\\\", [])\\n        # store only name \\u0026 manifest_digest keys, as others aren't used and take memory\\n        all_tags.update({tag[\\\"name\\\"]: tag[\\\"manifest_digest\\\"] for tag in tags})\\n\\n        if not tags:\\n            LOGGER.debug(\\\"No tags found.\\\")\\n            break\\n\\n        page = json_data.get(\\\"page\\\", None)\\n        additional = json_data.get(\\\"has_additional\\\", False)\\n\\n        if additional:\\n            next_page = page + 1\\n        else:\\n            break\\n\\n    return all_tags\\n\\n\\ndef delete_image_tag(quay_token: str, namespace: str, name: str, tag: str) -\\u003e None:\\n    api_url = f\\\"{QUAY_API_URL}/repository/{namespace}/{name}/tag/{tag}\\\"\\n    request = Request(api_url, method=\\\"DELETE\\\", headers={\\n        \\\"Authorization\\\": f\\\"Bearer {quay_token}\\\",\\n    })\\n    resp: HTTPResponse\\n    try:\\n        with urlopen(request) as resp:\\n            if resp.status != 200 and resp.status != 204:\\n                raise RuntimeError(resp.reason)\\n\\n    except HTTPError as ex:\\n        # ignore if not found\\n        if ex.status != 404:\\n            raise(ex)\\n\\n\\ndef manifest_exists(quay_token: str, namespace: str, name: str, manifest: str) -\\u003e bool:\\n    api_url = f\\\"{QUAY_API_URL}/repository/{namespace}/{name}/manifest/{manifest}\\\"\\n    request = Request(api_url, headers={\\n        \\\"Authorization\\\": f\\\"Bearer {quay_token}\\\",\\n    })\\n    resp: HTTPResponse\\n    manifest_exists = True\\n    try:\\n        with urlopen(request) as resp:\\n            if resp.status != 200 and resp.status != 204:\\n                raise RuntimeError(resp.reason)\\n\\n    except HTTPError as ex:\\n        if ex.status != 404:\\n            raise(ex)\\n        else:\\n            manifest_exists = False\\n\\n    return manifest_exists\\n\\n\\ndef remove_tags(tags_map: Dict[str, Any], quay_token: str, namespace: str, name: str, dry_run: bool = False) -\\u003e None:\\n    image_digests = set(tags_map.values())\\n    # sha without any extension is clair report\\n    tag_regex = re.compile(r\\\"^sha256-([0-9a-f]+)(\\\\.sbom|\\\\.att|\\\\.src|\\\\.sig|\\\\.dockerfile)?$\\\")\\n    manifests_checked = {}\\n    for tag_name in tags_map:\\n        # attestation or sbom image\\n        if (match := tag_regex.match(tag_name)) is not None:\\n            if f\\\"sha256:{match.group(1)}\\\" not in image_digests:\\n                # verify that manifest really doesn't exist, because if tag was removed, it won't be in tag list, but may still be in the registry\\n                manifest_existence = manifests_checked.get(f\\\"sha256:{match.group(1)}\\\")\\n                if manifest_existence is None:\\n                    manifest_existence = manifest_exists(quay_token, namespace, name, f\\\"sha256:{match.group(1)}\\\")\\n                    manifests_checked[f\\\"sha256:{match.group(1)}\\\"] = manifest_existence\\n\\n                if not manifest_existence:\\n                    if dry_run:\\n                        LOGGER.info(\\\"Tag %s from %s/%s should be removed\\\", tag_name, namespace, name)\\n                    else:\\n                        LOGGER.info(\\\"Removing tag %s from %s/%s\\\", tag_name, namespace, name)\\n                        delete_image_tag(quay_token, namespace, name, tag_name)\\n\\n        elif tag_name.endswith(\\\".src\\\"):\\n            to_delete = False\\n\\n            binary_tag = tag_name.removesuffix(\\\".src\\\")\\n            if binary_tag not in tags_map:\\n                to_delete = True\\n            else:\\n                manifest_digest = tags_map[binary_tag]\\n                new_src_tag = f\\\"{manifest_digest.replace(':', '-')}.src\\\"\\n                to_delete = new_src_tag in tags_map\\n\\n            if to_delete:\\n                LOGGER.info(\\\"Removing deprecated tag %s\\\", tag_name)\\n                delete_image_tag(quay_token, namespace, name, tag_name)\\n        else:\\n            LOGGER.debug(\\\"%s is not in a known type to be deleted.\\\", tag_name)\\n\\n\\ndef process_repositories(repos: List[ImageRepo], quay_token: str, dry_run: bool = False) -\\u003e None:\\n    for repo in repos:\\n        namespace = repo[\\\"namespace\\\"]\\n        name = repo[\\\"name\\\"]\\n        # skip huge repository for which we can't get all tags\\n        if name == \\\"ocp-art-tenant/art-images\\\":\\n            continue\\n        LOGGER.info(\\\"Processing repository %s: %s/%s\\\", next(processed_repos_counter), namespace, name)\\n\\n        all_tags = get_quay_tags(quay_token, namespace, name)\\n\\n        if not all_tags:\\n            continue\\n\\n        remove_tags(all_tags, quay_token, namespace, name, dry_run=dry_run)\\n\\n\\ndef fetch_image_repos(access_token: str, namespace: str) -\\u003e Iterator[List[ImageRepo]]:\\n    next_page = None\\n    resp: HTTPResponse\\n    while True:\\n        query_args = {\\\"namespace\\\": namespace}\\n        if next_page is not None:\\n            query_args[\\\"next_page\\\"] = next_page\\n\\n        api_url = f\\\"{QUAY_API_URL}/repository?{urlencode(query_args)}\\\"\\n        request = Request(api_url, headers={\\n            \\\"Authorization\\\": f\\\"Bearer {access_token}\\\",\\n        })\\n\\n        try:\\n            with urlopen(request) as resp:\\n                if resp.status != 200:\\n                    raise RuntimeError(resp.reason)\\n                json_data = json.loads(resp.read())\\n        except json.JSONDecodeError:\\n            LOGGER.info(\\\"Json decoder error, will retry\\\")\\n            continue\\n\\n        repos = json_data.get(\\\"repositories\\\", [])\\n        if not repos:\\n            LOGGER.debug(\\\"No image repository is found.\\\")\\n            break\\n\\n        yield repos\\n\\n        if (next_page := json_data.get(\\\"next_page\\\", None)) is None:\\n            break\\n\\n\\ndef main():\\n    token = os.getenv(\\\"QUAY_TOKEN\\\")\\n    if not token:\\n        raise ValueError(\\\"The token required for access to Quay API is missing!\\\")\\n\\n    args = parse_args()\\n\\n    for image_repos in fetch_image_repos(token, args.namespace):\\n        process_repositories(image_repos, token, dry_run=args.dry_run)\\n\\n\\ndef parse_args():\\n    parser = argparse.ArgumentParser()\\n    parser.add_argument(\\\"--namespace\\\", required=True)\\n    parser.add_argument(\\\"--dry-run\\\", action=\\\"store_true\\\")\\n    args = parser.parse_args()\\n    return args\\n\\n\\nif __name__ == \\\"__main__\\\":\\n    main()\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"name\":\"image-controller-image-pruner-configmap-mmk4bf6tc8\",\"namespace\":\"image-controller\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:26:34Z",
                "name": "image-controller-image-pruner-configmap-mmk4bf6tc8",
                "namespace": "image-controller",
                "resourceVersion": "3479",
                "uid": "bf43d75f-4510-4dc3-8fcb-5acada78eb5c"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "reset_notifications.py": "import argparse\nimport itertools\nimport json\nimport logging\nimport os\n\nfrom collections.abc import Iterator\nfrom http.client import HTTPResponse\nfrom typing import Any, Dict, List\nfrom urllib.error import HTTPError\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\n\nlogging.basicConfig(\n    format=\"%(asctime)s - %(levelname)s - %(message)s\", level=logging.INFO\n)\nLOGGER = logging.getLogger(__name__)\nQUAY_API_URL = \"https://quay.io/api/v1\"\n\nprocessed_repos_counter = itertools.count()\n\nImageRepo = Dict[str, Any]\nRepoNotification = Dict[str, Any]\n\n\ndef get_quay_notifications(\n    quay_token: str, namespace: str, name: str\n) -\u003e List[RepoNotification]:\n    \"\"\"\n    Get all notifications for a repository\n    Quay API response format:\n    {\n        \"notifications\": [\n            {\n                \"uuid\": \"string\",\n                \"number_of_failures\": int,\n                \"title\": \"string\",\n                ...\n            },\n            ...\n        ]\n    }\n    \"\"\"\n    resp: HTTPResponse\n\n    api_url = f\"{QUAY_API_URL}/repository/{namespace}/{name}/notification/\"\n    request = Request(\n        api_url,\n        headers={\n            \"Authorization\": f\"Bearer {quay_token}\",\n        },\n    )\n\n    try:\n        with urlopen(request) as resp:\n            if resp.status != 200:\n                # do not fail the job if we can't fetch notifications\n                # for single repository\n                LOGGER.warning(\"Failed to fetch notifications for %s/%s\", namespace, name)\n                json_data = {}\n            else:\n                json_data = json.loads(resp.read())\n    except HTTPError:\n        LOGGER.warning(\"Failed to fetch notifications for %s/%s\", namespace, name)\n        json_data = {}\n\n    return json_data.get(\"notifications\", [])\n\n\ndef reset_notification(uuid: str, quay_token: str, namespace: str, name: str) -\u003e None:\n    \"\"\"Reset notification by notification uuid\"\"\"\n    api_url = f\"{QUAY_API_URL}/repository/{namespace}/{name}/notification/{uuid}\"\n    request = Request(\n        api_url,\n        method=\"POST\",\n        headers={\n            \"Authorization\": f\"Bearer {quay_token}\",\n        },\n    )\n    resp: HTTPResponse\n    try:\n        with urlopen(request) as resp:\n            # The actual API response is 204 for notification reset\n            # There is bug in Quay Swagger docs generator\n            # claiming all POST request return 201\n            if resp.status not in (201, 204):\n                # do not fail the job if we can't reset notification\n                LOGGER.warning(\n                    \"Failed to reset notification %s from %s/%s\",\n                    uuid,\n                    namespace,\n                    name,\n                )\n    except HTTPError as ex:\n        # Quay API returns 400 if notification is not found\n        # filter out when this is the case\n        rsp_message = json.loads(ex.read()).get(\"detail\", \"\")\n        if ex.status == 400 and rsp_message.startswith(\n            \"No repository notification found\"\n        ):\n            LOGGER.info(\n                \"Notification %s from %s/%s was not found\", uuid, namespace, name\n            )\n        else:\n            LOGGER.warning(\n                \"Failed to reset notification %s from %s/%s with error: %s\",\n                uuid,\n                namespace,\n                name,\n                rsp_message,\n            )\n\n\ndef process_repositories(\n    repos: List[ImageRepo], quay_token: str, dry_run: bool = False\n) -\u003e None:\n    \"\"\"Process all repositories and reset notifications if needed\"\"\"\n    for repo in repos:\n        namespace = repo[\"namespace\"]\n        name = repo[\"name\"]\n        LOGGER.info(\n            \"Processing repository %s: %s/%s\",\n            next(processed_repos_counter),\n            namespace,\n            name,\n        )\n        all_notifications = get_quay_notifications(quay_token, namespace, name)\n\n        if not all_notifications:\n            continue\n\n        for notification in all_notifications:\n            notification_title = notification.get(\"title\", \"\")\n            uuid = notification[\"uuid\"]\n            if notification.get(\"number_of_failures\", 0) \u003e 0:\n                if dry_run:\n                    LOGGER.info(\n                        \"Notification %s with title %s from %s/%s should be reset\",\n                        uuid,\n                        notification_title,\n                        namespace,\n                        name,\n                    )\n                else:\n                    reset_notification(uuid, quay_token, namespace, name)\n                    LOGGER.info(\n                        \"Notification %s with title %s from %s/%s was reset\",\n                        uuid,\n                        notification_title,\n                        namespace,\n                        name,\n                    )\n            else:\n                LOGGER.info(\n                    \"Notification %s with title %s from %s/%s has no failures\",\n                    uuid,\n                    notification_title,\n                    namespace,\n                    name,\n                )\n\n\ndef fetch_image_repos(access_token: str, namespace: str) -\u003e Iterator[List[ImageRepo]]:\n    \"\"\"Fetch all image repositories for a given namespace\"\"\"\n    next_page = None\n    resp: HTTPResponse\n    retry = 0\n    while True:\n        query_args = {\"namespace\": namespace}\n        if next_page is not None:\n            query_args[\"next_page\"] = next_page\n\n        api_url = f\"{QUAY_API_URL}/repository?{urlencode(query_args)}\"\n        request = Request(\n            api_url,\n            headers={\n                \"Authorization\": f\"Bearer {access_token}\",\n            },\n        )\n        try:\n            with urlopen(request) as resp:\n                if resp.status == 200:\n                    json_data = json.loads(resp.read())\n                else:\n                    # this will raise error for 2xx other than 200\n                    # urlopen raises HTTPError for all non 2xx responses\n                    raise HTTPError(resp.reason)\n        except HTTPError as ex:\n            # retry 5 times before giving up\n            if retry \u003c 5:\n                retry += 1\n                continue\n            else:\n                LOGGER.error(\n                    \"Unable to fetch repositories for namespace %s\",\n                    namespace,\n                )\n                raise RuntimeError(ex)\n\n        repos = json_data.get(\"repositories\", [])\n        if not repos:\n            LOGGER.debug(\"No image repository is found.\")\n            break\n\n        yield repos\n\n        if (next_page := json_data.get(\"next_page\", None)) is None:\n            break\n\n\ndef main():\n    token = os.getenv(\"QUAY_TOKEN\")\n    if not token:\n        raise ValueError(\"The token required for access to Quay API is missing!\")\n\n    args = parse_args()\n    for image_repos in fetch_image_repos(token, args.namespace):\n        process_repositories(image_repos, token, dry_run=args.dry_run)\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser()\n    parser.add_argument(\"--namespace\", required=True)\n    parser.add_argument(\"--dry-run\", action=\"store_true\")\n    args = parser.parse_args()\n    return args\n\n\nif __name__ == \"__main__\":\n    main()\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"reset_notifications.py\":\"import argparse\\nimport itertools\\nimport json\\nimport logging\\nimport os\\n\\nfrom collections.abc import Iterator\\nfrom http.client import HTTPResponse\\nfrom typing import Any, Dict, List\\nfrom urllib.error import HTTPError\\nfrom urllib.parse import urlencode\\nfrom urllib.request import Request, urlopen\\n\\nlogging.basicConfig(\\n    format=\\\"%(asctime)s - %(levelname)s - %(message)s\\\", level=logging.INFO\\n)\\nLOGGER = logging.getLogger(__name__)\\nQUAY_API_URL = \\\"https://quay.io/api/v1\\\"\\n\\nprocessed_repos_counter = itertools.count()\\n\\nImageRepo = Dict[str, Any]\\nRepoNotification = Dict[str, Any]\\n\\n\\ndef get_quay_notifications(\\n    quay_token: str, namespace: str, name: str\\n) -\\u003e List[RepoNotification]:\\n    \\\"\\\"\\\"\\n    Get all notifications for a repository\\n    Quay API response format:\\n    {\\n        \\\"notifications\\\": [\\n            {\\n                \\\"uuid\\\": \\\"string\\\",\\n                \\\"number_of_failures\\\": int,\\n                \\\"title\\\": \\\"string\\\",\\n                ...\\n            },\\n            ...\\n        ]\\n    }\\n    \\\"\\\"\\\"\\n    resp: HTTPResponse\\n\\n    api_url = f\\\"{QUAY_API_URL}/repository/{namespace}/{name}/notification/\\\"\\n    request = Request(\\n        api_url,\\n        headers={\\n            \\\"Authorization\\\": f\\\"Bearer {quay_token}\\\",\\n        },\\n    )\\n\\n    try:\\n        with urlopen(request) as resp:\\n            if resp.status != 200:\\n                # do not fail the job if we can't fetch notifications\\n                # for single repository\\n                LOGGER.warning(\\\"Failed to fetch notifications for %s/%s\\\", namespace, name)\\n                json_data = {}\\n            else:\\n                json_data = json.loads(resp.read())\\n    except HTTPError:\\n        LOGGER.warning(\\\"Failed to fetch notifications for %s/%s\\\", namespace, name)\\n        json_data = {}\\n\\n    return json_data.get(\\\"notifications\\\", [])\\n\\n\\ndef reset_notification(uuid: str, quay_token: str, namespace: str, name: str) -\\u003e None:\\n    \\\"\\\"\\\"Reset notification by notification uuid\\\"\\\"\\\"\\n    api_url = f\\\"{QUAY_API_URL}/repository/{namespace}/{name}/notification/{uuid}\\\"\\n    request = Request(\\n        api_url,\\n        method=\\\"POST\\\",\\n        headers={\\n            \\\"Authorization\\\": f\\\"Bearer {quay_token}\\\",\\n        },\\n    )\\n    resp: HTTPResponse\\n    try:\\n        with urlopen(request) as resp:\\n            # The actual API response is 204 for notification reset\\n            # There is bug in Quay Swagger docs generator\\n            # claiming all POST request return 201\\n            if resp.status not in (201, 204):\\n                # do not fail the job if we can't reset notification\\n                LOGGER.warning(\\n                    \\\"Failed to reset notification %s from %s/%s\\\",\\n                    uuid,\\n                    namespace,\\n                    name,\\n                )\\n    except HTTPError as ex:\\n        # Quay API returns 400 if notification is not found\\n        # filter out when this is the case\\n        rsp_message = json.loads(ex.read()).get(\\\"detail\\\", \\\"\\\")\\n        if ex.status == 400 and rsp_message.startswith(\\n            \\\"No repository notification found\\\"\\n        ):\\n            LOGGER.info(\\n                \\\"Notification %s from %s/%s was not found\\\", uuid, namespace, name\\n            )\\n        else:\\n            LOGGER.warning(\\n                \\\"Failed to reset notification %s from %s/%s with error: %s\\\",\\n                uuid,\\n                namespace,\\n                name,\\n                rsp_message,\\n            )\\n\\n\\ndef process_repositories(\\n    repos: List[ImageRepo], quay_token: str, dry_run: bool = False\\n) -\\u003e None:\\n    \\\"\\\"\\\"Process all repositories and reset notifications if needed\\\"\\\"\\\"\\n    for repo in repos:\\n        namespace = repo[\\\"namespace\\\"]\\n        name = repo[\\\"name\\\"]\\n        LOGGER.info(\\n            \\\"Processing repository %s: %s/%s\\\",\\n            next(processed_repos_counter),\\n            namespace,\\n            name,\\n        )\\n        all_notifications = get_quay_notifications(quay_token, namespace, name)\\n\\n        if not all_notifications:\\n            continue\\n\\n        for notification in all_notifications:\\n            notification_title = notification.get(\\\"title\\\", \\\"\\\")\\n            uuid = notification[\\\"uuid\\\"]\\n            if notification.get(\\\"number_of_failures\\\", 0) \\u003e 0:\\n                if dry_run:\\n                    LOGGER.info(\\n                        \\\"Notification %s with title %s from %s/%s should be reset\\\",\\n                        uuid,\\n                        notification_title,\\n                        namespace,\\n                        name,\\n                    )\\n                else:\\n                    reset_notification(uuid, quay_token, namespace, name)\\n                    LOGGER.info(\\n                        \\\"Notification %s with title %s from %s/%s was reset\\\",\\n                        uuid,\\n                        notification_title,\\n                        namespace,\\n                        name,\\n                    )\\n            else:\\n                LOGGER.info(\\n                    \\\"Notification %s with title %s from %s/%s has no failures\\\",\\n                    uuid,\\n                    notification_title,\\n                    namespace,\\n                    name,\\n                )\\n\\n\\ndef fetch_image_repos(access_token: str, namespace: str) -\\u003e Iterator[List[ImageRepo]]:\\n    \\\"\\\"\\\"Fetch all image repositories for a given namespace\\\"\\\"\\\"\\n    next_page = None\\n    resp: HTTPResponse\\n    retry = 0\\n    while True:\\n        query_args = {\\\"namespace\\\": namespace}\\n        if next_page is not None:\\n            query_args[\\\"next_page\\\"] = next_page\\n\\n        api_url = f\\\"{QUAY_API_URL}/repository?{urlencode(query_args)}\\\"\\n        request = Request(\\n            api_url,\\n            headers={\\n                \\\"Authorization\\\": f\\\"Bearer {access_token}\\\",\\n            },\\n        )\\n        try:\\n            with urlopen(request) as resp:\\n                if resp.status == 200:\\n                    json_data = json.loads(resp.read())\\n                else:\\n                    # this will raise error for 2xx other than 200\\n                    # urlopen raises HTTPError for all non 2xx responses\\n                    raise HTTPError(resp.reason)\\n        except HTTPError as ex:\\n            # retry 5 times before giving up\\n            if retry \\u003c 5:\\n                retry += 1\\n                continue\\n            else:\\n                LOGGER.error(\\n                    \\\"Unable to fetch repositories for namespace %s\\\",\\n                    namespace,\\n                )\\n                raise RuntimeError(ex)\\n\\n        repos = json_data.get(\\\"repositories\\\", [])\\n        if not repos:\\n            LOGGER.debug(\\\"No image repository is found.\\\")\\n            break\\n\\n        yield repos\\n\\n        if (next_page := json_data.get(\\\"next_page\\\", None)) is None:\\n            break\\n\\n\\ndef main():\\n    token = os.getenv(\\\"QUAY_TOKEN\\\")\\n    if not token:\\n        raise ValueError(\\\"The token required for access to Quay API is missing!\\\")\\n\\n    args = parse_args()\\n    for image_repos in fetch_image_repos(token, args.namespace):\\n        process_repositories(image_repos, token, dry_run=args.dry_run)\\n\\n\\ndef parse_args():\\n    parser = argparse.ArgumentParser()\\n    parser.add_argument(\\\"--namespace\\\", required=True)\\n    parser.add_argument(\\\"--dry-run\\\", action=\\\"store_true\\\")\\n    args = parser.parse_args()\\n    return args\\n\\n\\nif __name__ == \\\"__main__\\\":\\n    main()\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"name\":\"image-controller-notification-resetter-configmap-tfm9h79698\",\"namespace\":\"image-controller\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:26:34Z",
                "name": "image-controller-notification-resetter-configmap-tfm9h79698",
                "namespace": "image-controller",
                "resourceVersion": "3482",
                "uid": "583a2f69-38cd-4a81-9525-85bf488ddda3"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:26:33Z",
                "name": "kube-root-ca.crt",
                "namespace": "image-controller",
                "resourceVersion": "3455",
                "uid": "bb23d788-67f7-4159-a247-bd56420cee18"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:21:33Z",
                "name": "kube-root-ca.crt",
                "namespace": "kube-node-lease",
                "resourceVersion": "363",
                "uid": "83f7ba82-a9b5-4823-baf5-7d1f83e9815c"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "jws-kubeconfig-abcdef": "eyJhbGciOiJIUzI1NiIsImtpZCI6ImFiY2RlZiJ9..EsRCuSz2Wc8eyKWybCaj09KmAme7Gm6HiFaOIXWkGyQ",
                "kubeconfig": "apiVersion: v1\nclusters:\n- cluster:\n    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJSVQrZUMwTElQczB3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TmpBek1UZ3hPREUyTVRCYUZ3MHpOakF6TVRVeE9ESXhNVEJhTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUURyTkVpRVlJMjFNZW12WlBmRGlVYjhyYXoyNnBIaHd3T0JTbWN0VUIvR1pKR0M2dnUwblU3cmZSZlUKcDJDb2NLbFZuV21CYVhiL1cxZkMwMlZqYi80eUhYL2dabkRVczRGZDlvNWFnbm5wZ3RybFpCekxaR2dadEhrOApjKzRNSXI1TmZFT01URDNvelJ1TVdaaEFmcy9DM0VqYWlVWWM3RTFBaDBkQnh1N3UrOUd3M0RSR1FPZ0M4aVlQCkVNS0diaUoyZjZITHVWdVhncHplSlc1OTd4U2VxNlVWdmMydWU3TUZLeWlWb3ZLSkRjZ1BLc09vUGhCZ2JxVFgKMmZsaGpBOUsyVzhtM0J0blFwZXNmc1RUYVhRYkhaWFltUGZzL0pVNjN4UVdDNWM3YTZYVmRhSnN5SUpmKzNGQwp1aEZvRVJWTThMOWFBZytQcldMbWorWm9PTlJWQWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJRUDZYeFdtb1Zqa1YvT1lra1QyelcyVDh6b1JEQVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQ2EvY3NNR21hTgpzSVFXaldyUENHTS9VM0xHaUM3VjBhaVFKZEZ0R0RBTHpIWVBDUnJOaWFRcHFoNjRPRnFsdGJ4K2ZyQnlZRmRDCm1CWUdQRENGcVhhSWlZV1R4Nkl0OXdvZGxMR0R3eC9OL3FIaE1Xd2t4Zm80TWhRU2REWHR3SkNlQUIwaXAyTVMKNDJTTkhHeUdxeDJCZmRuWDBqVWtkVUpkQTRJTDJiWjJKSXJZUkl2RFN1SkJybmpjaXdFV1U4QnBZL1ZnVkFEYgpVemxZNFFocVZXRDFHQmYvMHFRVU5GZmgzMVlLbGlBT2YxS3ozR1JRR3RJRkZ6bTNNVHNhV0p1VWlCcHFhMTQxCkNvbmNRS01Nb20wWGlEY0pkSFVwempKVG5Td2JoaGo2SHVySmJRT0JQbjFWNlRRcDB3dHZpTVhnQUo1OXFGaG8KY1cwelNvZ0hGVi9RCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K\n    server: https://kind-mapt-control-plane:6443\n  name: \"\"\ncontexts: null\ncurrent-context: \"\"\nkind: Config\npreferences: {}\nusers: null\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "creationTimestamp": "2026-03-18T18:21:25Z",
                "name": "cluster-info",
                "namespace": "kube-public",
                "resourceVersion": "368",
                "uid": "6bca03ab-d537-4831-adb7-6d46ef0c6330"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:21:33Z",
                "name": "kube-root-ca.crt",
                "namespace": "kube-public",
                "resourceVersion": "364",
                "uid": "3814497d-2bf0-486d-8713-3731800909cc"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "Corefile": ".:53 {\n    errors\n    health {\n       lameduck 5s\n    }\n    ready\n    kubernetes cluster.local in-addr.arpa ip6.arpa {\n       pods insecure\n       fallthrough in-addr.arpa ip6.arpa\n       ttl 30\n    }\n    prometheus :9153\n    forward . /etc/resolv.conf {\n       max_concurrent 1000\n    }\n    cache 30 {\n       disable success cluster.local\n       disable denial cluster.local\n    }\n    loop\n    reload\n    loadbalance\n}\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "creationTimestamp": "2026-03-18T18:21:25Z",
                "name": "coredns",
                "namespace": "kube-system",
                "resourceVersion": "253",
                "uid": "04743967-5f5d-4050-abbe-27c34161487b"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "client-ca-file": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n",
                "requestheader-allowed-names": "[\"front-proxy-client\"]",
                "requestheader-client-ca-file": "-----BEGIN CERTIFICATE-----\nMIIDETCCAfmgAwIBAgIIEnDXWD66cDAwDQYJKoZIhvcNAQELBQAwGTEXMBUGA1UE\nAxMOZnJvbnQtcHJveHktY2EwHhcNMjYwMzE4MTgxNjEwWhcNMzYwMzE1MTgyMTEw\nWjAZMRcwFQYDVQQDEw5mcm9udC1wcm94eS1jYTCCASIwDQYJKoZIhvcNAQEBBQAD\nggEPADCCAQoCggEBAOeEI96XqDekbUrer8NLk2MFeh1GfsvNaxAIKI/+izSG1PR/\nrmhom1ONOxarg7ngR43iraWwlmF7LDhTBUCZkZXtIeFAkeCNX6ooEjTSUwrHoYlQ\nE8dG9WJ4+rgnLLkE9EEwEmohYRijxXNekBUdFucL+p8IwZ9PJlwSOt4kjUKSpB+R\nKlOJjbXzL+5pIbCbR1Nj2M2xjAsrBrgQyEuhOV8nWjv3SYhKimxjo8jdYuqJNpg3\nb80r3q/EF9AdJag5Ufsi14x5eoRN2bkCTwpSc7C7J1s7K+BXq4ywTaSnxtAPlOne\n1kN070Dwu7Q4g9CeKQxw50tTbLzp6N3j2BYpyV8CAwEAAaNdMFswDgYDVR0PAQH/\nBAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJF0Mn6T06AqwEQUc+os\nU3bMzaYfMBkGA1UdEQQSMBCCDmZyb250LXByb3h5LWNhMA0GCSqGSIb3DQEBCwUA\nA4IBAQBvYfA5j/k/6t1TlHYA+H68g5niJfrlBgPi6NrEGnONCDElj230NbUz4DMW\nKn2B3Oca2/i2Fped6tKyEv5pTP2Ljp1nvKSlQjXQWS3eP2y1YMY2lw61RsZJig/k\n1vJwKzvlIBW63xI8LLlxX46mxWiTmLOg8pl+8f/0eIxrt4v6Gkubgdfcjy87N1/Y\naH1IDnqALBAauskpdgqNhRBopIBeNSenJYEmDuZoGThaINXlO1qN7ic7MiSerLj2\nmlAHe2le/H5CSkbKbXKrHk2in+bKtWb1vlK30V6Rfe4g1WKUHa/GatUU/O/YoVoN\n5HEnxMZvD2v+2EDdkLp+5pwRRssj\n-----END CERTIFICATE-----\n",
                "requestheader-extra-headers-prefix": "[\"X-Remote-Extra-\"]",
                "requestheader-group-headers": "[\"X-Remote-Group\"]",
                "requestheader-username-headers": "[\"X-Remote-User\"]"
            },
            "kind": "ConfigMap",
            "metadata": {
                "creationTimestamp": "2026-03-18T18:21:23Z",
                "name": "extension-apiserver-authentication",
                "namespace": "kube-system",
                "resourceVersion": "20",
                "uid": "f3e990e6-a327-4178-8ce2-9534db240ec8"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "since": "2026-03-18"
            },
            "kind": "ConfigMap",
            "metadata": {
                "creationTimestamp": "2026-03-18T18:21:23Z",
                "name": "kube-apiserver-legacy-service-account-token-tracking",
                "namespace": "kube-system",
                "resourceVersion": "42",
                "uid": "c416b0bc-0532-4ab4-8c40-f9a6afc354d8"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "config.conf": "apiVersion: kubeproxy.config.k8s.io/v1alpha1\nbindAddress: 0.0.0.0\nbindAddressHardFail: false\nclientConnection:\n  acceptContentTypes: \"\"\n  burst: 0\n  contentType: \"\"\n  kubeconfig: /var/lib/kube-proxy/kubeconfig.conf\n  qps: 0\nclusterCIDR: 10.244.0.0/16\nconfigSyncPeriod: 0s\nconntrack:\n  maxPerCore: 0\n  min: null\n  tcpBeLiberal: false\n  tcpCloseWaitTimeout: null\n  tcpEstablishedTimeout: null\n  udpStreamTimeout: 0s\n  udpTimeout: 0s\ndetectLocal:\n  bridgeInterface: \"\"\n  interfaceNamePrefix: \"\"\ndetectLocalMode: \"\"\nenableProfiling: false\nhealthzBindAddress: \"\"\nhostnameOverride: \"\"\niptables:\n  localhostNodePorts: null\n  masqueradeAll: false\n  masqueradeBit: null\n  minSyncPeriod: 1s\n  syncPeriod: 0s\nipvs:\n  excludeCIDRs: null\n  minSyncPeriod: 0s\n  scheduler: \"\"\n  strictARP: false\n  syncPeriod: 0s\n  tcpFinTimeout: 0s\n  tcpTimeout: 0s\n  udpTimeout: 0s\nkind: KubeProxyConfiguration\nlogging:\n  flushFrequency: 0\n  options:\n    json:\n      infoBufferSize: \"0\"\n    text:\n      infoBufferSize: \"0\"\n  verbosity: 0\nmetricsBindAddress: \"\"\nmode: iptables\nnftables:\n  masqueradeAll: false\n  masqueradeBit: null\n  minSyncPeriod: 0s\n  syncPeriod: 0s\nnodePortAddresses: null\noomScoreAdj: null\nportRange: \"\"\nshowHiddenMetricsForVersion: \"\"\nwinkernel:\n  enableDSR: false\n  forwardHealthCheckVip: false\n  networkName: \"\"\n  rootHnsEndpointName: \"\"\n  sourceVip: \"\"",
                "kubeconfig.conf": "apiVersion: v1\nkind: Config\nclusters:\n- cluster:\n    certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt\n    server: https://kind-mapt-control-plane:6443\n  name: default\ncontexts:\n- context:\n    cluster: default\n    namespace: default\n    user: default\n  name: default\ncurrent-context: default\nusers:\n- name: default\n  user:\n    tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token"
            },
            "kind": "ConfigMap",
            "metadata": {
                "creationTimestamp": "2026-03-18T18:21:25Z",
                "labels": {
                    "app": "kube-proxy"
                },
                "name": "kube-proxy",
                "namespace": "kube-system",
                "resourceVersion": "271",
                "uid": "6b0e34c7-34f2-4797-9d4f-ffc411c8c6d4"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:21:33Z",
                "name": "kube-root-ca.crt",
                "namespace": "kube-system",
                "resourceVersion": "365",
                "uid": "95f3fb58-ae65-4e25-8e9e-92a549504ebd"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ClusterConfiguration": "apiServer:\n  certSANs:\n  - 52.38.97.40\n  extraArgs:\n  - name: runtime-config\n    value: \"\"\napiVersion: kubeadm.k8s.io/v1beta4\ncaCertificateValidityPeriod: 87600h0m0s\ncertificateValidityPeriod: 8760h0m0s\ncertificatesDir: /etc/kubernetes/pki\nclusterName: kind-mapt\ncontrolPlaneEndpoint: kind-mapt-control-plane:6443\ncontrollerManager:\n  extraArgs:\n  - name: enable-hostpath-provisioner\n    value: \"true\"\ndns: {}\nencryptionAlgorithm: RSA-2048\netcd:\n  local:\n    dataDir: /var/lib/etcd\nimageRepository: registry.k8s.io\nkind: ClusterConfiguration\nkubernetesVersion: v1.32.5\nnetworking:\n  dnsDomain: cluster.local\n  podSubnet: 10.244.0.0/16\n  serviceSubnet: 10.96.0.0/16\nproxy: {}\nscheduler: {}\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "creationTimestamp": "2026-03-18T18:21:25Z",
                "name": "kubeadm-config",
                "namespace": "kube-system",
                "resourceVersion": "236",
                "uid": "10fa56ff-5e6b-4c54-bd5c-f11a693d481b"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "kubelet": "apiVersion: kubelet.config.k8s.io/v1beta1\nauthentication:\n  anonymous:\n    enabled: false\n  webhook:\n    cacheTTL: 0s\n    enabled: true\n  x509:\n    clientCAFile: /etc/kubernetes/pki/ca.crt\nauthorization:\n  mode: Webhook\n  webhook:\n    cacheAuthorizedTTL: 0s\n    cacheUnauthorizedTTL: 0s\ncgroupDriver: systemd\ncgroupRoot: /kubelet\nclusterDNS:\n- 10.96.0.10\nclusterDomain: cluster.local\ncontainerRuntimeEndpoint: \"\"\ncpuManagerReconcilePeriod: 0s\ncrashLoopBackOff: {}\nevictionHard:\n  imagefs.available: 0%\n  nodefs.available: 0%\n  nodefs.inodesFree: 0%\nevictionPressureTransitionPeriod: 0s\nfailSwapOn: false\nfileCheckFrequency: 0s\nhealthzBindAddress: 127.0.0.1\nhealthzPort: 10248\nhttpCheckFrequency: 0s\nimageGCHighThresholdPercent: 100\nimageMaximumGCAge: 0s\nimageMinimumGCAge: 0s\nkind: KubeletConfiguration\nlogging:\n  flushFrequency: 0\n  options:\n    json:\n      infoBufferSize: \"0\"\n    text:\n      infoBufferSize: \"0\"\n  verbosity: 0\nmemorySwap: {}\nnodeStatusReportFrequency: 0s\nnodeStatusUpdateFrequency: 0s\nrotateCertificates: true\nruntimeRequestTimeout: 0s\nshutdownGracePeriod: 0s\nshutdownGracePeriodCriticalPods: 0s\nstaticPodPath: /etc/kubernetes/manifests\nstreamingConnectionIdleTimeout: 0s\nsyncFrequency: 0s\nvolumeStatsAggPeriod: 0s\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "creationTimestamp": "2026-03-18T18:21:25Z",
                "name": "kubelet-config",
                "namespace": "kube-system",
                "resourceVersion": "239",
                "uid": "9ae993c1-f513-40b4-8cb8-cb34612d635f"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:21:33Z",
                "name": "kube-root-ca.crt",
                "namespace": "local-path-storage",
                "resourceVersion": "366",
                "uid": "b78e77b7-319b-4e23-ada1-1ceb3d44f3d4"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "config.json": "{\n        \"nodePathMap\":[\n        {\n                \"node\":\"DEFAULT_PATH_FOR_NON_LISTED_NODES\",\n                \"paths\":[\"/var/local-path-provisioner\"]\n        }\n        ]\n}",
                "helperPod.yaml": "apiVersion: v1\nkind: Pod\nmetadata:\n  name: helper-pod\nspec:\n  priorityClassName: system-node-critical\n  tolerations:\n    - key: node.kubernetes.io/disk-pressure\n      operator: Exists\n      effect: NoSchedule\n  containers:\n  - name: helper-pod\n    image: docker.io/kindest/local-path-helper:v20241212-8ac705d0\n    imagePullPolicy: IfNotPresent",
                "setup": "#!/bin/sh\nset -eu\nmkdir -m 0777 -p \"$VOL_DIR\"",
                "teardown": "#!/bin/sh\nset -eu\nrm -rf \"$VOL_DIR\""
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"config.json\":\"{\\n        \\\"nodePathMap\\\":[\\n        {\\n                \\\"node\\\":\\\"DEFAULT_PATH_FOR_NON_LISTED_NODES\\\",\\n                \\\"paths\\\":[\\\"/var/local-path-provisioner\\\"]\\n        }\\n        ]\\n}\",\"helperPod.yaml\":\"apiVersion: v1\\nkind: Pod\\nmetadata:\\n  name: helper-pod\\nspec:\\n  priorityClassName: system-node-critical\\n  tolerations:\\n    - key: node.kubernetes.io/disk-pressure\\n      operator: Exists\\n      effect: NoSchedule\\n  containers:\\n  - name: helper-pod\\n    image: docker.io/kindest/local-path-helper:v20241212-8ac705d0\\n    imagePullPolicy: IfNotPresent\",\"setup\":\"#!/bin/sh\\nset -eu\\nmkdir -m 0777 -p \\\"$VOL_DIR\\\"\",\"teardown\":\"#!/bin/sh\\nset -eu\\nrm -rf \\\"$VOL_DIR\\\"\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"name\":\"local-path-config\",\"namespace\":\"local-path-storage\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:21:27Z",
                "name": "local-path-config",
                "namespace": "local-path-storage",
                "resourceVersion": "299",
                "uid": "dcdf84bf-b2e2-426e-8828-94a57b7571db"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:25:24Z",
                "name": "kube-root-ca.crt",
                "namespace": "pipelines-as-code",
                "resourceVersion": "2706",
                "uid": "3aa39adc-a771-4064-95c9-d9a7036aff62"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "loglevel.pac-watcher": "info",
                "loglevel.pipelines-as-code-webhook": "info",
                "loglevel.pipelinesascode": "info",
                "zap-logger-config": "{\n  \"level\": \"info\",\n  \"development\": false,\n  \"sampling\": {\n    \"initial\": 100,\n    \"thereafter\": 100\n  },\n  \"outputPaths\": [\"stdout\"],\n  \"errorOutputPaths\": [\"stderr\"],\n  \"encoding\": \"json\",\n  \"encoderConfig\": {\n    \"timeKey\": \"ts\",\n    \"levelKey\": \"level\",\n    \"nameKey\": \"logger\",\n    \"callerKey\": \"caller\",\n    \"messageKey\": \"msg\",\n    \"stacktraceKey\": \"stacktrace\",\n    \"lineEnding\": \"\",\n    \"levelEncoder\": \"\",\n    \"timeEncoder\": \"iso8601\",\n    \"durationEncoder\": \"\",\n    \"callerEncoder\": \"\"\n  }\n}\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"loglevel.pac-watcher\":\"info\",\"loglevel.pipelines-as-code-webhook\":\"info\",\"loglevel.pipelinesascode\":\"info\",\"zap-logger-config\":\"{\\n  \\\"level\\\": \\\"info\\\",\\n  \\\"development\\\": false,\\n  \\\"sampling\\\": {\\n    \\\"initial\\\": 100,\\n    \\\"thereafter\\\": 100\\n  },\\n  \\\"outputPaths\\\": [\\\"stdout\\\"],\\n  \\\"errorOutputPaths\\\": [\\\"stderr\\\"],\\n  \\\"encoding\\\": \\\"json\\\",\\n  \\\"encoderConfig\\\": {\\n    \\\"timeKey\\\": \\\"ts\\\",\\n    \\\"levelKey\\\": \\\"level\\\",\\n    \\\"nameKey\\\": \\\"logger\\\",\\n    \\\"callerKey\\\": \\\"caller\\\",\\n    \\\"messageKey\\\": \\\"msg\\\",\\n    \\\"stacktraceKey\\\": \\\"stacktrace\\\",\\n    \\\"lineEnding\\\": \\\"\\\",\\n    \\\"levelEncoder\\\": \\\"\\\",\\n    \\\"timeEncoder\\\": \\\"iso8601\\\",\\n    \\\"durationEncoder\\\": \\\"\\\",\\n    \\\"callerEncoder\\\": \\\"\\\"\\n  }\\n}\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/instance\":\"default\",\"app.kubernetes.io/part-of\":\"pipelines-as-code\"},\"name\":\"pac-config-logging\",\"namespace\":\"pipelines-as-code\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:25:27Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "pipelines-as-code"
                },
                "name": "pac-config-logging",
                "namespace": "pipelines-as-code",
                "resourceVersion": "2899",
                "uid": "3e180c67-52c9-4fac-ba91-8a5926d15d8f"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"_example\":\"################################\\n#                              #\\n#    EXAMPLE CONFIGURATION     #\\n#                              #\\n################################\\n# This block is not actually functional configuration,\\n# but serves to illustrate the available configuration\\n# options and document them in a way that is accessible\\n# to users that `kubectl edit` this config map.\\n#\\n# These sample configuration options may be copied out of\\n# this example block and unindented to be in the data block\\n# to actually change the configuration.\\n# lease-duration is how long non-leaders will wait to try to acquire the\\n# lock; 15 seconds is the value used by core kubernetes controllers.\\nlease-duration: \\\"60s\\\"\\n# renew-deadline is how long a leader will try to renew the lease before\\n# giving up; 10 seconds is the value used by core kubernetes controllers.\\nrenew-deadline: \\\"40s\\\"\\n# retry-period is how long the leader election client waits between tries of\\n# actions; 2 seconds is the value used by core kubernetes controllers.\\nretry-period: \\\"10s\\\"\\n# buckets is the number of buckets used to partition key space of each\\n# Reconciler. If this number is M and the replica number of the controller\\n# is N, the N replicas will compete for the M buckets. The owner of a\\n# bucket will take care of the reconciling for the keys partitioned into\\n# that bucket.\\nbuckets: \\\"1\\\"\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/instance\":\"default\",\"app.kubernetes.io/part-of\":\"pipelines-as-code\"},\"name\":\"pac-watcher-config-leader-election\",\"namespace\":\"pipelines-as-code\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:25:27Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "pipelines-as-code"
                },
                "name": "pac-watcher-config-leader-election",
                "namespace": "pipelines-as-code",
                "resourceVersion": "2908",
                "uid": "c92b0ac9-e438-4663-aa36-d8351b73f2f3"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"_example\":\"################################\\n#                              #\\n#    EXAMPLE CONFIGURATION     #\\n#                              #\\n################################\\n# This block is not actually functional configuration,\\n# but serves to illustrate the available configuration\\n# options and document them in a way that is accessible\\n# to users that `kubectl edit` this config map.\\n#\\n# These sample configuration options may be copied out of\\n# this example block and unindented to be in the data block\\n# to actually change the configuration.\\n# lease-duration is how long non-leaders will wait to try to acquire the\\n# lock; 15 seconds is the value used by core kubernetes controllers.\\nlease-duration: \\\"60s\\\"\\n# renew-deadline is how long a leader will try to renew the lease before\\n# giving up; 10 seconds is the value used by core kubernetes controllers.\\nrenew-deadline: \\\"40s\\\"\\n# retry-period is how long the leader election client waits between tries of\\n# actions; 2 seconds is the value used by core kubernetes controllers.\\nretry-period: \\\"10s\\\"\\n# buckets is the number of buckets used to partition key space of each\\n# Reconciler. If this number is M and the replica number of the controller\\n# is N, the N replicas will compete for the M buckets. The owner of a\\n# bucket will take care of the reconciling for the keys partitioned into\\n# that bucket.\\nbuckets: \\\"1\\\"\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/instance\":\"default\",\"app.kubernetes.io/part-of\":\"pipelines-as-code\"},\"name\":\"pac-webhook-config-leader-election\",\"namespace\":\"pipelines-as-code\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:25:27Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "pipelines-as-code"
                },
                "name": "pac-webhook-config-leader-election",
                "namespace": "pipelines-as-code",
                "resourceVersion": "2910",
                "uid": "537c95d7-7f44-4299-8df8-dc53adc1c83b"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "application-name": "Local Konflux",
                "auto-configure-new-github-repo": "false",
                "auto-configure-repo-namespace-template": "",
                "auto-configure-repo-repository-template": "",
                "bitbucket-cloud-additional-source-ip": "",
                "bitbucket-cloud-check-source-ip": "true",
                "custom-console-name": "Local Konflux",
                "custom-console-url": "https://52.38.97.40:9443",
                "custom-console-url-namespace": "https://52.38.97.40:9443/ns/{{ namespace }}",
                "custom-console-url-pr-details": "https://52.38.97.40:9443/ns/{{ namespace }}/pipelinerun/{{ pr }}",
                "custom-console-url-pr-tasklog": "https://52.38.97.40:9443/ns/{{ namespace }}/pipelinerun/{{ pr }}/logs/{{ task }}",
                "default-max-keep-runs": "",
                "enable-cancel-in-progress-on-pull-requests": "false",
                "enable-cancel-in-progress-on-push": "false",
                "error-detection-from-container-logs": "true",
                "error-detection-max-number-of-lines": "50",
                "error-detection-simple-regexp": "^(?P\u003cfilename\u003e[^:]*):(?P\u003cline\u003e[0-9]+):(?P\u003ccolumn\u003e[0-9]+)?([ ]*)?(?P\u003cerror\u003e.*)",
                "error-log-snippet": "true",
                "error-log-snippet-number-of-lines": "3",
                "hub-catalog-type": "artifacthub",
                "hub-url": "https://artifacthub.io",
                "max-keep-run-upper-limit": "",
                "remember-ok-to-test": "false",
                "remote-tasks": "true",
                "require-ok-to-test-sha": "false",
                "secret-auto-create": "true",
                "secret-github-app-scope-extra-repos": "",
                "secret-github-app-token-scoped": "true",
                "skip-push-event-for-pr-commits": "true",
                "tekton-dashboard-url": ""
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"application-name\":\"Local Konflux\",\"auto-configure-new-github-repo\":\"false\",\"auto-configure-repo-namespace-template\":\"\",\"auto-configure-repo-repository-template\":\"\",\"bitbucket-cloud-additional-source-ip\":\"\",\"bitbucket-cloud-check-source-ip\":\"true\",\"custom-console-name\":\"Local Konflux\",\"custom-console-url\":\"https://52.38.97.40:9443\",\"custom-console-url-namespace\":\"https://52.38.97.40:9443/ns/{{ namespace }}\",\"custom-console-url-pr-details\":\"https://52.38.97.40:9443/ns/{{ namespace }}/pipelinerun/{{ pr }}\",\"custom-console-url-pr-tasklog\":\"https://52.38.97.40:9443/ns/{{ namespace }}/pipelinerun/{{ pr }}/logs/{{ task }}\",\"default-max-keep-runs\":\"\",\"enable-cancel-in-progress-on-pull-requests\":\"false\",\"enable-cancel-in-progress-on-push\":\"false\",\"error-detection-from-container-logs\":\"true\",\"error-detection-max-number-of-lines\":\"50\",\"error-detection-simple-regexp\":\"^(?P\\u003cfilename\\u003e[^:]*):(?P\\u003cline\\u003e[0-9]+):(?P\\u003ccolumn\\u003e[0-9]+)?([ ]*)?(?P\\u003cerror\\u003e.*)\",\"error-log-snippet\":\"true\",\"error-log-snippet-number-of-lines\":\"3\",\"hub-catalog-type\":\"artifacthub\",\"hub-url\":\"https://artifacthub.io\",\"max-keep-run-upper-limit\":\"\",\"remember-ok-to-test\":\"false\",\"remote-tasks\":\"true\",\"require-ok-to-test-sha\":\"false\",\"secret-auto-create\":\"true\",\"secret-github-app-scope-extra-repos\":\"\",\"secret-github-app-token-scoped\":\"true\",\"skip-push-event-for-pr-commits\":\"true\",\"tekton-dashboard-url\":\"\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/part-of\":\"pipelines-as-code\",\"app.kubernetes.io/version\":\"v0.42.0\"},\"name\":\"pipelines-as-code\",\"namespace\":\"pipelines-as-code\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:25:28Z",
                "labels": {
                    "app.kubernetes.io/part-of": "pipelines-as-code",
                    "app.kubernetes.io/version": "v0.42.0"
                },
                "name": "pipelines-as-code",
                "namespace": "pipelines-as-code",
                "resourceVersion": "2926",
                "uid": "dd0e178c-f94e-43ed-8192-2d6237fba035"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# metrics.backend-destination field specifies the system metrics destination.\n# It supports either prometheus (the default) or stackdriver.\n# Note: Using Stackdriver will incur additional charges.\nmetrics.backend-destination: prometheus\n# metrics.stackdriver-project-id field specifies the Stackdriver project ID. This\n# field is optional. When running on GCE, application default credentials will be\n# used and metrics will be sent to the cluster's project if this field is\n# not provided.\nmetrics.stackdriver-project-id: \"\u003cyour stackdriver project id\u003e\"\n# metrics.allow-stackdriver-custom-metrics indicates whether it is allowed\n# to send metrics to Stackdriver using \"global\" resource type and custom\n# metric type. Setting this flag to \"true\" could cause extra Stackdriver\n# charge.  If metrics.backend-destination is not Stackdriver, this is\n# ignored.\nmetrics.allow-stackdriver-custom-metrics: \"false\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"_example\":\"################################\\n#                              #\\n#    EXAMPLE CONFIGURATION     #\\n#                              #\\n################################\\n# This block is not actually functional configuration,\\n# but serves to illustrate the available configuration\\n# options and document them in a way that is accessible\\n# to users that `kubectl edit` this config map.\\n#\\n# These sample configuration options may be copied out of\\n# this example block and unindented to be in the data block\\n# to actually change the configuration.\\n# metrics.backend-destination field specifies the system metrics destination.\\n# It supports either prometheus (the default) or stackdriver.\\n# Note: Using Stackdriver will incur additional charges.\\nmetrics.backend-destination: prometheus\\n# metrics.stackdriver-project-id field specifies the Stackdriver project ID. This\\n# field is optional. When running on GCE, application default credentials will be\\n# used and metrics will be sent to the cluster's project if this field is\\n# not provided.\\nmetrics.stackdriver-project-id: \\\"\\u003cyour stackdriver project id\\u003e\\\"\\n# metrics.allow-stackdriver-custom-metrics indicates whether it is allowed\\n# to send metrics to Stackdriver using \\\"global\\\" resource type and custom\\n# metric type. Setting this flag to \\\"true\\\" could cause extra Stackdriver\\n# charge.  If metrics.backend-destination is not Stackdriver, this is\\n# ignored.\\nmetrics.allow-stackdriver-custom-metrics: \\\"false\\\"\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/part-of\":\"pipelines-as-code\",\"app.kubernetes.io/version\":\"v0.42.0\"},\"name\":\"pipelines-as-code-config-observability\",\"namespace\":\"pipelines-as-code\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:25:28Z",
                "labels": {
                    "app.kubernetes.io/part-of": "pipelines-as-code",
                    "app.kubernetes.io/version": "v0.42.0"
                },
                "name": "pipelines-as-code-config-observability",
                "namespace": "pipelines-as-code",
                "resourceVersion": "2956",
                "uid": "3cf518f8-4cbd-4195-b7d9-e25d9f546a6e"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "controller-url": "",
                "provider": "",
                "version": "v0.42.0"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"controller-url\":\"\",\"provider\":\"\",\"version\":\"v0.42.0\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/part-of\":\"pipelines-as-code\",\"app.kubernetes.io/version\":\"v0.42.0\"},\"name\":\"pipelines-as-code-info\",\"namespace\":\"pipelines-as-code\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:25:28Z",
                "labels": {
                    "app.kubernetes.io/part-of": "pipelines-as-code",
                    "app.kubernetes.io/version": "v0.42.0"
                },
                "name": "pipelines-as-code-info",
                "namespace": "pipelines-as-code",
                "resourceVersion": "2971",
                "uid": "cc0e4b5c-3182-485a-a422-cd4b0edc86f7"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n\n# Common configuration for all Knative codebase\nzap-logger-config: |\n  {\n    \"level\": \"info\",\n    \"development\": false,\n    \"outputPaths\": [\"stdout\"],\n    \"errorOutputPaths\": [\"stderr\"],\n    \"encoding\": \"json\",\n    \"encoderConfig\": {\n      \"timeKey\": \"ts\",\n      \"levelKey\": \"level\",\n      \"nameKey\": \"logger\",\n      \"callerKey\": \"caller\",\n      \"messageKey\": \"msg\",\n      \"stacktraceKey\": \"stacktrace\",\n      \"lineEnding\": \"\",\n      \"levelEncoder\": \"\",\n      \"timeEncoder\": \"iso8601\",\n      \"durationEncoder\": \"\",\n      \"callerEncoder\": \"\"\n    }\n  }\n",
                "loglevel.tekton-operator-cluster-operations": "info",
                "loglevel.tekton-operator-lifecycle": "info",
                "loglevel.tekton-operator-webhook": "info",
                "zap-logger-config": "{\n  \"level\": \"info\",\n  \"development\": false,\n  \"sampling\": {\n    \"initial\": 100,\n    \"thereafter\": 100\n  },\n  \"outputPaths\": [\"stdout\"],\n  \"errorOutputPaths\": [\"stderr\"],\n  \"encoding\": \"json\",\n  \"encoderConfig\": {\n    \"timeKey\": \"timestamp\",\n    \"levelKey\": \"level\",\n    \"nameKey\": \"logger\",\n    \"callerKey\": \"caller\",\n    \"messageKey\": \"msg\",\n    \"stacktraceKey\": \"stacktrace\",\n    \"lineEnding\": \"\",\n    \"levelEncoder\": \"\",\n    \"timeEncoder\": \"iso8601\",\n    \"durationEncoder\": \"\",\n    \"callerEncoder\": \"\"\n  }\n}\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"_example\":\"################################\\n#                              #\\n#    EXAMPLE CONFIGURATION     #\\n#                              #\\n################################\\n\\n# This block is not actually functional configuration,\\n# but serves to illustrate the available configuration\\n# options and document them in a way that is accessible\\n# to users that `kubectl edit` this config map.\\n#\\n# These sample configuration options may be copied out of\\n# this example block and unindented to be in the data block\\n# to actually change the configuration.\\n\\n# Common configuration for all Knative codebase\\nzap-logger-config: |\\n  {\\n    \\\"level\\\": \\\"info\\\",\\n    \\\"development\\\": false,\\n    \\\"outputPaths\\\": [\\\"stdout\\\"],\\n    \\\"errorOutputPaths\\\": [\\\"stderr\\\"],\\n    \\\"encoding\\\": \\\"json\\\",\\n    \\\"encoderConfig\\\": {\\n      \\\"timeKey\\\": \\\"ts\\\",\\n      \\\"levelKey\\\": \\\"level\\\",\\n      \\\"nameKey\\\": \\\"logger\\\",\\n      \\\"callerKey\\\": \\\"caller\\\",\\n      \\\"messageKey\\\": \\\"msg\\\",\\n      \\\"stacktraceKey\\\": \\\"stacktrace\\\",\\n      \\\"lineEnding\\\": \\\"\\\",\\n      \\\"levelEncoder\\\": \\\"\\\",\\n      \\\"timeEncoder\\\": \\\"iso8601\\\",\\n      \\\"durationEncoder\\\": \\\"\\\",\\n      \\\"callerEncoder\\\": \\\"\\\"\\n    }\\n  }\\n\",\"loglevel.tekton-operator-cluster-operations\":\"info\",\"loglevel.tekton-operator-lifecycle\":\"info\",\"loglevel.tekton-operator-webhook\":\"info\",\"zap-logger-config\":\"{\\n  \\\"level\\\": \\\"info\\\",\\n  \\\"development\\\": false,\\n  \\\"sampling\\\": {\\n    \\\"initial\\\": 100,\\n    \\\"thereafter\\\": 100\\n  },\\n  \\\"outputPaths\\\": [\\\"stdout\\\"],\\n  \\\"errorOutputPaths\\\": [\\\"stderr\\\"],\\n  \\\"encoding\\\": \\\"json\\\",\\n  \\\"encoderConfig\\\": {\\n    \\\"timeKey\\\": \\\"timestamp\\\",\\n    \\\"levelKey\\\": \\\"level\\\",\\n    \\\"nameKey\\\": \\\"logger\\\",\\n    \\\"callerKey\\\": \\\"caller\\\",\\n    \\\"messageKey\\\": \\\"msg\\\",\\n    \\\"stacktraceKey\\\": \\\"stacktrace\\\",\\n    \\\"lineEnding\\\": \\\"\\\",\\n    \\\"levelEncoder\\\": \\\"\\\",\\n    \\\"timeEncoder\\\": \\\"iso8601\\\",\\n    \\\"durationEncoder\\\": \\\"\\\",\\n    \\\"callerEncoder\\\": \\\"\\\"\\n  }\\n}\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"operator.tekton.dev/release\":\"v0.79.0\"},\"name\":\"config-logging\",\"namespace\":\"tekton-operator\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:23:18Z",
                "labels": {
                    "operator.tekton.dev/release": "v0.79.0"
                },
                "name": "config-logging",
                "namespace": "tekton-operator",
                "resourceVersion": "1049",
                "uid": "17918b51-d714-4b47-9e64-c98c69c1fd77"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:23:15Z",
                "name": "kube-root-ca.crt",
                "namespace": "tekton-operator",
                "resourceVersion": "986",
                "uid": "192b86f4-36e2-48f6-9db1-ece6aafa7a37"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "AUTOINSTALL_COMPONENTS": "true",
                "DEFAULT_TARGET_NAMESPACE": "tekton-pipelines"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"AUTOINSTALL_COMPONENTS\":\"true\",\"DEFAULT_TARGET_NAMESPACE\":\"tekton-pipelines\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"operator.tekton.dev/release\":\"v0.79.0\"},\"name\":\"tekton-config-defaults\",\"namespace\":\"tekton-operator\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:23:18Z",
                "labels": {
                    "operator.tekton.dev/release": "v0.79.0"
                },
                "name": "tekton-config-defaults",
                "namespace": "tekton-operator",
                "resourceVersion": "1050",
                "uid": "06831932-881d-4860-ac51-b44a9aef5c44"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# metrics.backend-destination field specifies the system metrics destination.\n# It supports either prometheus (the default) or stackdriver.\n# Note: Using Stackdriver will incur additional charges.\nmetrics.backend-destination: prometheus\n# metrics.stackdriver-project-id field specifies the Stackdriver project ID. This\n# field is optional. When running on GCE, application default credentials will be\n# used and metrics will be sent to the cluster's project if this field is\n# not provided.\nmetrics.stackdriver-project-id: \"\u003cyour stackdriver project id\u003e\"\n# metrics.allow-stackdriver-custom-metrics indicates whether it is allowed\n# to send metrics to Stackdriver using \"global\" resource type and custom\n# metric type. Setting this flag to \"true\" could cause extra Stackdriver\n# charge.  If metrics.backend-destination is not Stackdriver, this is\n# ignored.\nmetrics.allow-stackdriver-custom-metrics: \"false\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"_example\":\"################################\\n#                              #\\n#    EXAMPLE CONFIGURATION     #\\n#                              #\\n################################\\n# This block is not actually functional configuration,\\n# but serves to illustrate the available configuration\\n# options and document them in a way that is accessible\\n# to users that `kubectl edit` this config map.\\n#\\n# These sample configuration options may be copied out of\\n# this example block and unindented to be in the data block\\n# to actually change the configuration.\\n# metrics.backend-destination field specifies the system metrics destination.\\n# It supports either prometheus (the default) or stackdriver.\\n# Note: Using Stackdriver will incur additional charges.\\nmetrics.backend-destination: prometheus\\n# metrics.stackdriver-project-id field specifies the Stackdriver project ID. This\\n# field is optional. When running on GCE, application default credentials will be\\n# used and metrics will be sent to the cluster's project if this field is\\n# not provided.\\nmetrics.stackdriver-project-id: \\\"\\u003cyour stackdriver project id\\u003e\\\"\\n# metrics.allow-stackdriver-custom-metrics indicates whether it is allowed\\n# to send metrics to Stackdriver using \\\"global\\\" resource type and custom\\n# metric type. Setting this flag to \\\"true\\\" could cause extra Stackdriver\\n# charge.  If metrics.backend-destination is not Stackdriver, this is\\n# ignored.\\nmetrics.allow-stackdriver-custom-metrics: \\\"false\\\"\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/instance\":\"default\"},\"name\":\"tekton-config-observability\",\"namespace\":\"tekton-operator\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:23:19Z",
                "labels": {
                    "app.kubernetes.io/instance": "default"
                },
                "name": "tekton-config-observability",
                "namespace": "tekton-operator",
                "resourceVersion": "1051",
                "uid": "a0c28543-f501-4759-b08a-e4c1f352ed83"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"_example\":\"################################\\n#                              #\\n#    EXAMPLE CONFIGURATION     #\\n#                              #\\n################################\\n# This block is not actually functional configuration,\\n# but serves to illustrate the available configuration\\n# options and document them in a way that is accessible\\n# to users that `kubectl edit` this config map.\\n#\\n# These sample configuration options may be copied out of\\n# this example block and unindented to be in the data block\\n# to actually change the configuration.\\n# lease-duration is how long non-leaders will wait to try to acquire the\\n# lock; 15 seconds is the value used by core kubernetes controllers.\\nlease-duration: \\\"60s\\\"\\n# renew-deadline is how long a leader will try to renew the lease before\\n# giving up; 10 seconds is the value used by core kubernetes controllers.\\nrenew-deadline: \\\"40s\\\"\\n# retry-period is how long the leader election client waits between tries of\\n# actions; 2 seconds is the value used by core kubernetes controllers.\\nretry-period: \\\"10s\\\"\\n# buckets is the number of buckets used to partition key space of each\\n# Reconciler. If this number is M and the replica number of the controller\\n# is N, the N replicas will compete for the M buckets. The owner of a\\n# bucket will take care of the reconciling for the keys partitioned into\\n# that bucket.\\nbuckets: \\\"1\\\"\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/instance\":\"default\",\"operator.tekton.dev/release\":\"v0.79.0\"},\"name\":\"tekton-operator-controller-config-leader-election\",\"namespace\":\"tekton-operator\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:23:19Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "operator.tekton.dev/release": "v0.79.0"
                },
                "name": "tekton-operator-controller-config-leader-election",
                "namespace": "tekton-operator",
                "resourceVersion": "1053",
                "uid": "0ce23249-568d-4474-9974-7fbcd9ba3cda"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "version": "v0.79.0"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"version\":\"v0.79.0\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/instance\":\"default\"},\"name\":\"tekton-operator-info\",\"namespace\":\"tekton-operator\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:23:19Z",
                "labels": {
                    "app.kubernetes.io/instance": "default"
                },
                "name": "tekton-operator-info",
                "namespace": "tekton-operator",
                "resourceVersion": "1055",
                "uid": "0ada8ea4-2f1e-42d3-8b14-cb1809d23b3b"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"_example\":\"################################\\n#                              #\\n#    EXAMPLE CONFIGURATION     #\\n#                              #\\n################################\\n# This block is not actually functional configuration,\\n# but serves to illustrate the available configuration\\n# options and document them in a way that is accessible\\n# to users that `kubectl edit` this config map.\\n#\\n# These sample configuration options may be copied out of\\n# this example block and unindented to be in the data block\\n# to actually change the configuration.\\n# lease-duration is how long non-leaders will wait to try to acquire the\\n# lock; 15 seconds is the value used by core kubernetes controllers.\\nlease-duration: \\\"60s\\\"\\n# renew-deadline is how long a leader will try to renew the lease before\\n# giving up; 10 seconds is the value used by core kubernetes controllers.\\nrenew-deadline: \\\"40s\\\"\\n# retry-period is how long the leader election client waits between tries of\\n# actions; 2 seconds is the value used by core kubernetes controllers.\\nretry-period: \\\"10s\\\"\\n# buckets is the number of buckets used to partition key space of each\\n# Reconciler. If this number is M and the replica number of the controller\\n# is N, the N replicas will compete for the M buckets. The owner of a\\n# bucket will take care of the reconciling for the keys partitioned into\\n# that bucket.\\nbuckets: \\\"1\\\"\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{},\"labels\":{\"app.kubernetes.io/instance\":\"default\",\"operator.tekton.dev/release\":\"devel\"},\"name\":\"tekton-operator-webhook-config-leader-election\",\"namespace\":\"tekton-operator\"}}\n"
                },
                "creationTimestamp": "2026-03-18T18:23:19Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "operator.tekton.dev/release": "devel"
                },
                "name": "tekton-operator-webhook-config-leader-election",
                "namespace": "tekton-operator",
                "resourceVersion": "1056",
                "uid": "4be3e04c-fe72-4a2d-8c72-e85b880179c8"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "default-kind": "task",
                "default-service-account": "default"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "c995bbcc662f914838eaf89a69043b3ace6738b8a22bd848f2f471b8ae5216cc"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/component": "resolvers",
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "bundleresolver-config",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1321",
                "uid": "f55ae3c8-7f27-4c07-a09d-5f9ee8af7590"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "artifacts.oci.format": "simplesigning",
                "artifacts.oci.storage": "oci",
                "artifacts.pipelinerun.enable-deep-inspection": "true",
                "artifacts.pipelinerun.format": "in-toto",
                "artifacts.pipelinerun.storage": "oci",
                "artifacts.taskrun.format": "in-toto",
                "artifacts.taskrun.storage": "",
                "performance": "disable-ha: false\n",
                "transparency.enabled": "false"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "848b1f2ec8086fa074773918d3af2a55af28919e4f5a0624508c5650acda5aa9"
                },
                "creationTimestamp": "2026-03-18T18:25:24Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-chains",
                    "operator.tekton.dev/operand-name": "tektoncd-chains"
                },
                "name": "chains-config",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "chain-config-qrsrg",
                        "uid": "4653b76d-6af4-4eaa-9ccf-53aa4aa5baf4"
                    }
                ],
                "resourceVersion": "2729",
                "uid": "4c2fd07b-919f-47ae-becd-1249640dc07c"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "version": "v0.26.2"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "52b7dc54215fe63d278418389016fd97246a9de880a38e86ed63d4f3e7f7cd3f"
                },
                "creationTimestamp": "2026-03-18T18:24:35Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-chains",
                    "operator.tekton.dev/operand-name": "tektoncd-chains"
                },
                "name": "chains-info",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "chain-knk88",
                        "uid": "ec66fa87-5eeb-4c50-9492-98e66143cbee"
                    }
                ],
                "resourceVersion": "2056",
                "uid": "55bfa398-ed94-43cc-80b1-f377a046f034"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "allowed-namespaces": "",
                "blocked-namespaces": "",
                "default-kind": "task",
                "default-namespace": ""
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "1d97d5f8d219b9770f8504de9071270323093b8b7003b2ab6c40c1daf3fa4d47"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/component": "resolvers",
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "cluster-resolver-config",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1322",
                "uid": "337d0918-51f2-4693-9058-d1f2c719be5b"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n\n# default-timeout-minutes contains the default number of\n# minutes to use for TaskRun and PipelineRun, if none is specified.\ndefault-timeout-minutes: \"60\"  # 60 minutes\n\n# default-service-account contains the default service account name\n# to use for TaskRun and PipelineRun, if none is specified.\ndefault-service-account: \"default\"\n\n# default-managed-by-label-value contains the default value given to the\n# \"app.kubernetes.io/managed-by\" label applied to all Pods created for\n# TaskRuns. If a user's requested TaskRun specifies another value for this\n# label, the user's request supercedes.\ndefault-managed-by-label-value: \"tekton-pipelines\"\n\n# default-pod-template contains the default pod template to use for\n# TaskRun and PipelineRun. If a pod template is specified on the\n# PipelineRun, the default-pod-template is merged with that one.\n# default-pod-template:\n\n# default-affinity-assistant-pod-template contains the default pod template\n# to use for affinity assistant pods. If a pod template is specified on the\n# PipelineRun, the default-affinity-assistant-pod-template is merged with\n# that one.\n# default-affinity-assistant-pod-template:\n\n# default-cloud-events-sink contains the default CloudEvents sink to be\n# used for TaskRun and PipelineRun, when no sink is specified.\n# Note that right now it is still not possible to set a PipelineRun or\n# TaskRun specific sink, so the default is the only option available.\n# If no sink is specified, no CloudEvent is generated\n# default-cloud-events-sink:\n\n# default-task-run-workspace-binding contains the default workspace\n# configuration provided for any Workspaces that a Task declares\n# but that a TaskRun does not explicitly provide.\n# default-task-run-workspace-binding: |\n#   emptyDir: {}\n\n# default-max-matrix-combinations-count contains the default maximum number\n# of combinations from a Matrix, if none is specified.\ndefault-max-matrix-combinations-count: \"256\"\n\n# default-forbidden-env contains comma seperated environment variables that cannot be\n# overridden by podTemplate.\ndefault-forbidden-env:\n\n# default-resolver-type contains the default resolver type to be used in the cluster,\n# no default-resolver-type is specified by default\ndefault-resolver-type:\n\n# default-imagepullbackoff-timeout contains the default duration to wait\n# before requeuing the TaskRun to retry, specifying 0 here is equivalent to fail fast\n# possible values could be 1m, 5m, 10s, 1h, etc\n# default-imagepullbackoff-timeout: \"5m\"\n\n# default-maximum-resolution-timeout specifies the default duration used by the\n# resolution controller before timing out when exceeded.\n# Possible values include \"1m\", \"5m\", \"10s\", \"1h\", etc.\n# Example: default-maximum-resolution-timeout: \"1m\"\n\n# default-container-resource-requirements allow users to update default resource requirements\n# to a init-containers and containers of a pods create by the controller\n# Onet: All the resource requirements are applied to init-containers and containers\n# only if the existing resource requirements are empty.\n# default-container-resource-requirements: |\n#   place-scripts: # updates resource requirements of a 'place-scripts' container\n#     requests:\n#       memory: \"64Mi\"\n#       cpu: \"250m\"\n#     limits:\n#       memory: \"128Mi\"\n#       cpu: \"500m\"\n#\n#   prepare: # updates resource requirements of a 'prepare' container\n#     requests:\n#       memory: \"64Mi\"\n#       cpu: \"250m\"\n#     limits:\n#       memory: \"256Mi\"\n#       cpu: \"500m\"\n#\n#   working-dir-initializer: # updates resource requirements of a 'working-dir-initializer' container\n#     requests:\n#       memory: \"64Mi\"\n#       cpu: \"250m\"\n#     limits:\n#       memory: \"512Mi\"\n#       cpu: \"500m\"\n#\n#   prefix-scripts: # updates resource requirements of containers which starts with 'scripts-'\n#     requests:\n#       memory: \"64Mi\"\n#       cpu: \"250m\"\n#     limits:\n#       memory: \"128Mi\"\n#       cpu: \"500m\"\n#\n#   prefix-sidecar-scripts: # updates resource requirements of containers which starts with 'sidecar-scripts-'\n#     requests:\n#       memory: \"64Mi\"\n#       cpu: \"250m\"\n#     limits:\n#       memory: \"128Mi\"\n#       cpu: \"500m\"\n#\n#   default: # updates resource requirements of init-containers and containers which has empty resource requirements\n#     requests:\n#       memory: \"64Mi\"\n#       cpu: \"250m\"\n#     limits:\n#       memory: \"256Mi\"\n#       cpu: \"500m\"\n\n# default-sidecar-log-polling-interval specifies the polling interval for the Tekton sidecar log results container.\n# This controls how frequently the sidecar checks for step completion files written by steps in a TaskRun.\n# Lower values (e.g., \"10ms\") make the sidecar more responsive but may increase CPU usage; higher values (e.g., \"1s\")\n# reduce resource usage but may delay result collection.\n# This value is used by the sidecar-tekton-log-results container and can be tuned for performance or test scenarios.\n# Example values: \"100ms\", \"500ms\", \"1s\"\ndefault-sidecar-log-polling-interval: \"100ms\"\n\n# default-step-ref-concurrency-limit specifies the concurrency limit for resolving step references.\n# This setting controls the maximum number of concurrent goroutines used to resolve\n# step references (`step.ref` fields) simultaneously. This limit acts as a throttle\n# to prevent overwhelming remote servers (e.g., git providers, OCI registries) or\n# the Kubernetes API server, especially when a TaskRun contains many steps that\n# reference StepActions.\ndefault-step-ref-concurrency-limit: \"5\"\n",
                "default-service-account": "default"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"_example\":\"################################\\n#                              #\\n#    EXAMPLE CONFIGURATION     #\\n#                              #\\n################################\\n\\n# This block is not actually functional configuration,\\n# but serves to illustrate the available configuration\\n# options and document them in a way that is accessible\\n# to users that `kubectl edit` this config map.\\n#\\n# These sample configuration options may be copied out of\\n# this example block and unindented to be in the data block\\n# to actually change the configuration.\\n\\n# default-timeout-minutes contains the default number of\\n# minutes to use for TaskRun and PipelineRun, if none is specified.\\ndefault-timeout-minutes: \\\"60\\\"  # 60 minutes\\n\\n# default-service-account contains the default service account name\\n# to use for TaskRun and PipelineRun, if none is specified.\\ndefault-service-account: \\\"default\\\"\\n\\n# default-managed-by-label-value contains the default value given to the\\n# \\\"app.kubernetes.io/managed-by\\\" label applied to all Pods created for\\n# TaskRuns. If a user's requested TaskRun specifies another value for this\\n# label, the user's request supercedes.\\ndefault-managed-by-label-value: \\\"tekton-pipelines\\\"\\n\\n# default-pod-template contains the default pod template to use for\\n# TaskRun and PipelineRun. If a pod template is specified on the\\n# PipelineRun, the default-pod-template is merged with that one.\\n# default-pod-template:\\n\\n# default-affinity-assistant-pod-template contains the default pod template\\n# to use for affinity assistant pods. If a pod template is specified on the\\n# PipelineRun, the default-affinity-assistant-pod-template is merged with\\n# that one.\\n# default-affinity-assistant-pod-template:\\n\\n# default-cloud-events-sink contains the default CloudEvents sink to be\\n# used for TaskRun and PipelineRun, when no sink is specified.\\n# Note that right now it is still not possible to set a PipelineRun or\\n# TaskRun specific sink, so the default is the only option available.\\n# If no sink is specified, no CloudEvent is generated\\n# default-cloud-events-sink:\\n\\n# default-task-run-workspace-binding contains the default workspace\\n# configuration provided for any Workspaces that a Task declares\\n# but that a TaskRun does not explicitly provide.\\n# default-task-run-workspace-binding: |\\n#   emptyDir: {}\\n\\n# default-max-matrix-combinations-count contains the default maximum number\\n# of combinations from a Matrix, if none is specified.\\ndefault-max-matrix-combinations-count: \\\"256\\\"\\n\\n# default-forbidden-env contains comma seperated environment variables that cannot be\\n# overridden by podTemplate.\\ndefault-forbidden-env:\\n\\n# default-resolver-type contains the default resolver type to be used in the cluster,\\n# no default-resolver-type is specified by default\\ndefault-resolver-type:\\n\\n# default-imagepullbackoff-timeout contains the default duration to wait\\n# before requeuing the TaskRun to retry, specifying 0 here is equivalent to fail fast\\n# possible values could be 1m, 5m, 10s, 1h, etc\\n# default-imagepullbackoff-timeout: \\\"5m\\\"\\n\\n# default-maximum-resolution-timeout specifies the default duration used by the\\n# resolution controller before timing out when exceeded.\\n# Possible values include \\\"1m\\\", \\\"5m\\\", \\\"10s\\\", \\\"1h\\\", etc.\\n# Example: default-maximum-resolution-timeout: \\\"1m\\\"\\n\\n# default-container-resource-requirements allow users to update default resource requirements\\n# to a init-containers and containers of a pods create by the controller\\n# Onet: All the resource requirements are applied to init-containers and containers\\n# only if the existing resource requirements are empty.\\n# default-container-resource-requirements: |\\n#   place-scripts: # updates resource requirements of a 'place-scripts' container\\n#     requests:\\n#       memory: \\\"64Mi\\\"\\n#       cpu: \\\"250m\\\"\\n#     limits:\\n#       memory: \\\"128Mi\\\"\\n#       cpu: \\\"500m\\\"\\n#\\n#   prepare: # updates resource requirements of a 'prepare' container\\n#     requests:\\n#       memory: \\\"64Mi\\\"\\n#       cpu: \\\"250m\\\"\\n#     limits:\\n#       memory: \\\"256Mi\\\"\\n#       cpu: \\\"500m\\\"\\n#\\n#   working-dir-initializer: # updates resource requirements of a 'working-dir-initializer' container\\n#     requests:\\n#       memory: \\\"64Mi\\\"\\n#       cpu: \\\"250m\\\"\\n#     limits:\\n#       memory: \\\"512Mi\\\"\\n#       cpu: \\\"500m\\\"\\n#\\n#   prefix-scripts: # updates resource requirements of containers which starts with 'scripts-'\\n#     requests:\\n#       memory: \\\"64Mi\\\"\\n#       cpu: \\\"250m\\\"\\n#     limits:\\n#       memory: \\\"128Mi\\\"\\n#       cpu: \\\"500m\\\"\\n#\\n#   prefix-sidecar-scripts: # updates resource requirements of containers which starts with 'sidecar-scripts-'\\n#     requests:\\n#       memory: \\\"64Mi\\\"\\n#       cpu: \\\"250m\\\"\\n#     limits:\\n#       memory: \\\"128Mi\\\"\\n#       cpu: \\\"500m\\\"\\n#\\n#   default: # updates resource requirements of init-containers and containers which has empty resource requirements\\n#     requests:\\n#       memory: \\\"64Mi\\\"\\n#       cpu: \\\"250m\\\"\\n#     limits:\\n#       memory: \\\"256Mi\\\"\\n#       cpu: \\\"500m\\\"\\n\\n# default-sidecar-log-polling-interval specifies the polling interval for the Tekton sidecar log results container.\\n# This controls how frequently the sidecar checks for step completion files written by steps in a TaskRun.\\n# Lower values (e.g., \\\"10ms\\\") make the sidecar more responsive but may increase CPU usage; higher values (e.g., \\\"1s\\\")\\n# reduce resource usage but may delay result collection.\\n# This value is used by the sidecar-tekton-log-results container and can be tuned for performance or test scenarios.\\n# Example values: \\\"100ms\\\", \\\"500ms\\\", \\\"1s\\\"\\ndefault-sidecar-log-polling-interval: \\\"100ms\\\"\\n\\n# default-step-ref-concurrency-limit specifies the concurrency limit for resolving step references.\\n# This setting controls the maximum number of concurrent goroutines used to resolve\\n# step references (`step.ref` fields) simultaneously. This limit acts as a throttle\\n# to prevent overwhelming remote servers (e.g., git providers, OCI registries) or\\n# the Kubernetes API server, especially when a TaskRun contains many steps that\\n# reference StepActions.\\ndefault-step-ref-concurrency-limit: \\\"5\\\"\\n\",\"default-service-account\":\"default\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{\"operator.tekton.dev/last-applied-hash\":\"09f4e816c058c1e8c5993ec84e034d2cf647c86689378bf58a5e6bfa7a1aad39\"},\"creationTimestamp\":null,\"labels\":{\"app.kubernetes.io/instance\":\"default\",\"app.kubernetes.io/part-of\":\"tekton-pipelines\",\"operator.tekton.dev/operand-name\":\"tektoncd-pipelines\"},\"name\":\"config-defaults\",\"namespace\":\"tekton-pipelines\",\"ownerReferences\":[{\"apiVersion\":\"operator.tekton.dev/v1alpha1\",\"blockOwnerDeletion\":true,\"controller\":true,\"kind\":\"TektonInstallerSet\",\"name\":\"pipeline-main-static-v7nmp\",\"uid\":\"91f82231-cff3-4b1b-bbcf-47fc3d98b2b5\"}]}}\n",
                    "operator.tekton.dev/last-applied-hash": "09f4e816c058c1e8c5993ec84e034d2cf647c86689378bf58a5e6bfa7a1aad39"
                },
                "creationTimestamp": "2026-03-18T18:23:39Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-defaults",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "2865",
                "uid": "0c40f0cc-a50d-4d83-aa1d-6f0302e53b43"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n\n# default-service-account contains the default service account name\n# to use for TaskRun and PipelineRun, if none is specified.\ndefault-service-account: \"default\"\ndefault-run-as-user: \"65532\"\ndefault-run-as-group: \"65532\"\ndefault-fs-group: \"65532\"\ndefault-run-as-non-root: \"true\" # allowed values are true and false\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "7d05a80722fab305ba6aff651a132d7a9d0f94bfff0c17b45886834a1b09f717"
                },
                "creationTimestamp": "2026-03-18T18:24:10Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-triggers",
                    "operator.tekton.dev/operand-name": "tektoncd-triggers"
                },
                "name": "config-defaults-triggers",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "trigger-main-static-4lpr4",
                        "uid": "bbda7af2-a1bd-484d-ba98-1c4fceac7b2a"
                    }
                ],
                "resourceVersion": "1747",
                "uid": "41d446ce-95f0-4ba5-8f71-ba66d166049c"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n\n# formats contains a comma seperated list of event formats to be used\n# the only format supported today is \"tektonv1\". An empty string is not\n# a valid configuration. To disable events, do not specify the sink.\nformats: \"tektonv1\"\n\n# sink contains the event sink to be used for TaskRun, PipelineRun and\n# CustomRun. If no sink is specified, no CloudEvent is generated.\n# This setting supercedes the \"default-cloud-events-sink\" from the\n# \"config-defaults\" config map\nsink: \"https://events.sink/cdevents\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "d81bf5c8d159b16854a3b9fe07312521bc6c81a8a686a6cb2b8b520ca17c253c"
                },
                "creationTimestamp": "2026-03-18T18:23:39Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-events",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1304",
                "uid": "40eac13d-a869-4088-b4d5-b6199109e8a7"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "03700008fda31818d9d2ff5dc1f135c880a4f61b6f6fdd27a760e42aeb46ba20"
                },
                "creationTimestamp": "2026-03-18T18:23:39Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-leader-election-controller",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1307",
                "uid": "2e5b2ff8-2154-44dc-bd8d-7a9f0bdc9674"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "6530015d17179160083747eb7ebb74c45db33d863bc3252b395d8e0e49569036"
                },
                "creationTimestamp": "2026-03-18T18:23:39Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-leader-election-events",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1308",
                "uid": "c66600f4-2379-45c9-b22d-86749cc8b1a9"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "4fc55c2f08bd82302cdb368de7eb1a1a9ba873881de520a655c7adebe66a8cdb"
                },
                "creationTimestamp": "2026-03-18T18:23:41Z",
                "labels": {
                    "app.kubernetes.io/component": "resolvers",
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-leader-election-resolvers",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1324",
                "uid": "16e1ba75-277c-4bfc-84a8-f26988d4e605"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "7385b5d50fbe7c3b20a552621daf1f8192aa31e275ab631b55c5215bac55349d"
                },
                "creationTimestamp": "2026-03-18T18:24:10Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-triggers",
                    "operator.tekton.dev/operand-name": "tektoncd-triggers"
                },
                "name": "config-leader-election-triggers-controller",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "trigger-main-static-4lpr4",
                        "uid": "bbda7af2-a1bd-484d-ba98-1c4fceac7b2a"
                    }
                ],
                "resourceVersion": "1752",
                "uid": "fb43092b-3ffa-41ba-8c01-504bffd07ad6"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "dc088e5b2e0e2f992ad70aaf8aabc70894e26a4dcb93cfe6e8ecbbbe0142566d"
                },
                "creationTimestamp": "2026-03-18T18:24:10Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-triggers",
                    "operator.tekton.dev/operand-name": "tektoncd-triggers"
                },
                "name": "config-leader-election-triggers-webhook",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "trigger-main-static-4lpr4",
                        "uid": "bbda7af2-a1bd-484d-ba98-1c4fceac7b2a"
                    }
                ],
                "resourceVersion": "1753",
                "uid": "c806e819-e27f-4195-97cc-3a6813b19db2"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "7d4bed4af92944399a03a308667a6421c6b4e75515f2ebd5eb54fbd160d954eb"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-leader-election-webhook",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1309",
                "uid": "44a30bf7-8316-4077-8c86-8b6aa90187f8"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "loglevel.controller": "info",
                "loglevel.webhook": "info",
                "zap-logger-config": "{\n  \"level\": \"info\",\n  \"development\": false,\n  \"sampling\": {\n    \"initial\": 100,\n    \"thereafter\": 100\n  },\n  \"outputPaths\": [\"stdout\"],\n  \"errorOutputPaths\": [\"stderr\"],\n  \"encoding\": \"json\",\n  \"encoderConfig\": {\n    \"timeKey\": \"ts\",\n    \"levelKey\": \"level\",\n    \"nameKey\": \"logger\",\n    \"callerKey\": \"caller\",\n    \"messageKey\": \"msg\",\n    \"stacktraceKey\": \"stacktrace\",\n    \"lineEnding\": \"\",\n    \"levelEncoder\": \"\",\n    \"timeEncoder\": \"iso8601\",\n    \"durationEncoder\": \"\",\n    \"callerEncoder\": \"\"\n  }\n}\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"loglevel.controller\":\"info\",\"loglevel.webhook\":\"info\",\"zap-logger-config\":\"{\\n  \\\"level\\\": \\\"info\\\",\\n  \\\"development\\\": false,\\n  \\\"sampling\\\": {\\n    \\\"initial\\\": 100,\\n    \\\"thereafter\\\": 100\\n  },\\n  \\\"outputPaths\\\": [\\\"stdout\\\"],\\n  \\\"errorOutputPaths\\\": [\\\"stderr\\\"],\\n  \\\"encoding\\\": \\\"json\\\",\\n  \\\"encoderConfig\\\": {\\n    \\\"timeKey\\\": \\\"ts\\\",\\n    \\\"levelKey\\\": \\\"level\\\",\\n    \\\"nameKey\\\": \\\"logger\\\",\\n    \\\"callerKey\\\": \\\"caller\\\",\\n    \\\"messageKey\\\": \\\"msg\\\",\\n    \\\"stacktraceKey\\\": \\\"stacktrace\\\",\\n    \\\"lineEnding\\\": \\\"\\\",\\n    \\\"levelEncoder\\\": \\\"\\\",\\n    \\\"timeEncoder\\\": \\\"iso8601\\\",\\n    \\\"durationEncoder\\\": \\\"\\\",\\n    \\\"callerEncoder\\\": \\\"\\\"\\n  }\\n}\\n\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{\"operator.tekton.dev/last-applied-hash\":\"6b929b7e7d99e5d5f39dcada6abf16a6ad1d3a2aa3aab4584ac2c08b9ac09a5f\"},\"labels\":{\"app.kubernetes.io/instance\":\"default\",\"app.kubernetes.io/part-of\":\"tekton-chains\",\"operator.tekton.dev/operand-name\":\"tektoncd-chains\"},\"name\":\"config-logging\",\"namespace\":\"tekton-pipelines\",\"ownerReferences\":[{\"apiVersion\":\"operator.tekton.dev/v1alpha1\",\"blockOwnerDeletion\":true,\"controller\":true,\"kind\":\"TektonInstallerSet\",\"name\":\"chain-knk88\",\"uid\":\"ec66fa87-5eeb-4c50-9492-98e66143cbee\"}]}}\n",
                    "operator.tekton.dev/last-applied-hash": "6b929b7e7d99e5d5f39dcada6abf16a6ad1d3a2aa3aab4584ac2c08b9ac09a5f"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-chains",
                    "operator.tekton.dev/operand-name": "tektoncd-chains"
                },
                "name": "config-logging",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "chain-knk88",
                        "uid": "ec66fa87-5eeb-4c50-9492-98e66143cbee"
                    }
                ],
                "resourceVersion": "3049",
                "uid": "11b90741-56ce-4872-84aa-8a8ee85876d0"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "loglevel.controller": "info",
                "loglevel.eventlistener": "info",
                "loglevel.webhook": "info",
                "zap-logger-config": "{\n  \"level\": \"info\",\n  \"development\": false,\n  \"disableStacktrace\": true,\n  \"sampling\": {\n    \"initial\": 100,\n    \"thereafter\": 100\n  },\n  \"outputPaths\": [\"stdout\"],\n  \"errorOutputPaths\": [\"stderr\"],\n  \"encoding\": \"json\",\n  \"encoderConfig\": {\n    \"timeKey\": \"timestamp\",\n    \"levelKey\": \"severity\",\n    \"nameKey\": \"logger\",\n    \"callerKey\": \"caller\",\n    \"messageKey\": \"message\",\n    \"stacktraceKey\": \"stacktrace\",\n    \"lineEnding\": \"\",\n    \"levelEncoder\": \"\",\n    \"timeEncoder\": \"iso8601\",\n    \"durationEncoder\": \"\",\n    \"callerEncoder\": \"\"\n  }\n}\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "8eae89debf98472c6cb386165387b685318a238bb2992e894a3fdab35896c374"
                },
                "creationTimestamp": "2026-03-18T18:24:10Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-triggers",
                    "operator.tekton.dev/operand-name": "tektoncd-triggers"
                },
                "name": "config-logging-triggers",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "trigger-main-static-4lpr4",
                        "uid": "bbda7af2-a1bd-484d-ba98-1c4fceac7b2a"
                    }
                ],
                "resourceVersion": "1754",
                "uid": "76d9af81-f07a-40d8-b09e-158b70566c59"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n\n# OpenTelemetry Metrics Configuration\n# Protocol for metrics export (prometheus, grpc, http/protobuf, none)\n# Default if not specified: \"none\"\nmetrics-protocol: prometheus\n\n# Metrics endpoint (for grpc/http protocols)\n# Default: empty (uses default OTLP endpoint)\nmetrics-endpoint: \"\"\n\n# Metrics export interval (e.g., \"30s\", \"1m\")\n# Default: empty (uses default interval)\nmetrics-export-interval: \"\"\n\n# OpenTelemetry Tracing Configuration\n# Protocol for tracing export (grpc, http/protobuf, none, stdout)\n# Default: none\ntracing-protocol: none\n\n# Tracing endpoint (for grpc/http protocols)\n# Default: empty\ntracing-endpoint: \"\"\n\n# Tracing sampling rate (0.0 to 1.0)\n# Default: 1.0 (100% sampling)\ntracing-sampling-rate: \"1.0\"\n\n# Runtime Configuration\n# Enable profiling (enabled, disabled)\n# Default: disabled\nruntime-profiling: disabled\n\n# Runtime export interval (e.g., \"15s\")\n# Default: 15s\nruntime-export-interval: \"15s\"\n\n# Note: Legacy OpenCensus configuration (metrics.backend-destination, etc.) has been\n# removed as OpenCensus support is no longer provided by the underlying infrastructure.\n# Please use the OpenTelemetry configuration options above.\n",
                "metrics-protocol": "prometheus",
                "metrics.count.enable-reason": "false",
                "metrics.pipelinerun.duration-type": "histogram",
                "metrics.pipelinerun.level": "pipeline",
                "metrics.taskrun.duration-type": "histogram",
                "metrics.taskrun.level": "task"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"data\":{\"_example\":\"################################\\n#                              #\\n#    EXAMPLE CONFIGURATION     #\\n#                              #\\n################################\\n\\n# This block is not actually functional configuration,\\n# but serves to illustrate the available configuration\\n# options and document them in a way that is accessible\\n# to users that `kubectl edit` this config map.\\n#\\n# These sample configuration options may be copied out of\\n# this example block and unindented to be in the data block\\n# to actually change the configuration.\\n\\n# OpenTelemetry Metrics Configuration\\n# Protocol for metrics export (prometheus, grpc, http/protobuf, none)\\n# Default if not specified: \\\"none\\\"\\nmetrics-protocol: prometheus\\n\\n# Metrics endpoint (for grpc/http protocols)\\n# Default: empty (uses default OTLP endpoint)\\nmetrics-endpoint: \\\"\\\"\\n\\n# Metrics export interval (e.g., \\\"30s\\\", \\\"1m\\\")\\n# Default: empty (uses default interval)\\nmetrics-export-interval: \\\"\\\"\\n\\n# OpenTelemetry Tracing Configuration\\n# Protocol for tracing export (grpc, http/protobuf, none, stdout)\\n# Default: none\\ntracing-protocol: none\\n\\n# Tracing endpoint (for grpc/http protocols)\\n# Default: empty\\ntracing-endpoint: \\\"\\\"\\n\\n# Tracing sampling rate (0.0 to 1.0)\\n# Default: 1.0 (100% sampling)\\ntracing-sampling-rate: \\\"1.0\\\"\\n\\n# Runtime Configuration\\n# Enable profiling (enabled, disabled)\\n# Default: disabled\\nruntime-profiling: disabled\\n\\n# Runtime export interval (e.g., \\\"15s\\\")\\n# Default: 15s\\nruntime-export-interval: \\\"15s\\\"\\n\\n# Note: Legacy OpenCensus configuration (metrics.backend-destination, etc.) has been\\n# removed as OpenCensus support is no longer provided by the underlying infrastructure.\\n# Please use the OpenTelemetry configuration options above.\\n\",\"metrics-protocol\":\"prometheus\",\"metrics.count.enable-reason\":\"false\",\"metrics.pipelinerun.duration-type\":\"histogram\",\"metrics.pipelinerun.level\":\"pipeline\",\"metrics.taskrun.duration-type\":\"histogram\",\"metrics.taskrun.level\":\"task\"},\"kind\":\"ConfigMap\",\"metadata\":{\"annotations\":{\"operator.tekton.dev/last-applied-hash\":\"58e40a224b0c15e6efcbfd7027dc1b11f84152e78fe6f649acd51fa7685313a2\"},\"creationTimestamp\":null,\"labels\":{\"app.kubernetes.io/instance\":\"default\",\"app.kubernetes.io/part-of\":\"tekton-pipelines\",\"operator.tekton.dev/operand-name\":\"tektoncd-pipelines\"},\"name\":\"config-observability\",\"namespace\":\"tekton-pipelines\",\"ownerReferences\":[{\"apiVersion\":\"operator.tekton.dev/v1alpha1\",\"blockOwnerDeletion\":true,\"controller\":true,\"kind\":\"TektonInstallerSet\",\"name\":\"pipeline-main-static-v7nmp\",\"uid\":\"91f82231-cff3-4b1b-bbcf-47fc3d98b2b5\"}]}}\n",
                    "operator.tekton.dev/last-applied-hash": "58e40a224b0c15e6efcbfd7027dc1b11f84152e78fe6f649acd51fa7685313a2"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-observability",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "2925",
                "uid": "c86459ab-c10c-4396-a764-54eab334e432"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n\n# metrics.backend-destination field specifies the system metrics destination.\n# It supports either prometheus (the default) or stackdriver.\n# Note: Using stackdriver will incur additional charges\nmetrics.backend-destination: prometheus\n\n# metrics.stackdriver-project-id field specifies the stackdriver project ID. This\n# field is optional. When running on GCE, application default credentials will be\n# used if this field is not provided.\nmetrics.stackdriver-project-id: \"\u003cyour stackdriver project id\u003e\"\n\n# metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to\n# Stackdriver using \"global\" resource type and custom metric type if the\n# metrics are not supported by \"knative_revision\" resource type. Setting this\n# flag to \"true\" could cause extra Stackdriver charge.\n# If metrics.backend-destination is not Stackdriver, this is ignored.\nmetrics.allow-stackdriver-custom-metrics: \"false\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "1023066e8b31ee17571f8cdc028b23d35fac537125ca30d32bd1ee099aef5a80"
                },
                "creationTimestamp": "2026-03-18T18:24:10Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-triggers",
                    "operator.tekton.dev/operand-name": "tektoncd-triggers"
                },
                "name": "config-observability-triggers",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "trigger-main-static-4lpr4",
                        "uid": "bbda7af2-a1bd-484d-ba98-1c4fceac7b2a"
                    }
                ],
                "resourceVersion": "1756",
                "uid": "2cab97c1-cd30-456c-be16-102cc9220b37"
            }
        },
        {
            "apiVersion": "v1",
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "402c47aab3ed1e013152050abf0a5d65570f31103a0e9d86f373d71089a8c9ee"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-registry-cert",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1314",
                "uid": "ef969c76-e358-46cb-a762-a077c77799d4"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n#\n# spire-trust-domain specifies the SPIRE trust domain to use.\n# spire-trust-domain: \"example.org\"\n#\n# spire-socket-path specifies the SPIRE agent socket for SPIFFE workload API.\n# spire-socket-path: \"unix:///spiffe-workload-api/spire-agent.sock\"\n#\n# spire-server-addr specifies the SPIRE server address for workload/node registration.\n# spire-server-addr: \"spire-server.spire.svc.cluster.local:8081\"\n#\n# spire-node-alias-prefix specifies the SPIRE node alias prefix to use.\n# spire-node-alias-prefix: \"/tekton-node/\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "fe205083604aad356150883f5894cef44f4cba28d567db9483f9dea94f927751"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-spire",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1315",
                "uid": "6ba996ee-ff17-4c00-b8dd-c39ddae93aab"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n#\n# Enable sending traces to defined endpoint by setting this to true\nenabled: \"true\"\n#\n# API endpoint to send the traces to\n# (optional): The default value is given below\nendpoint: \"http://jaeger-collector.jaeger.svc.cluster.local:14268/api/traces\"\n# (optional) Name of the k8s secret which contains basic auth credentials\ncredentialsSecret: \"jaeger-creds\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "a720c9431089733364e3ac2ac0dab7aceba2822785ac004d2604354f8a61bd2b"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-tracing",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1316",
                "uid": "b767d109-dd46-49ae-bb8a-97eaa2246798"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "cap": "60s",
                "duration": "10s",
                "factor": "2.0",
                "jitter": "0.0",
                "steps": "5"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "ec253db9ff8021d7b772f60bb928c47cf98f7b2ae835a9483589dc2d6f0b0f9e"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "config-wait-exponential-backoff",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1317",
                "uid": "d055dd54-f509-4e98-8d55-106e44716fe9"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "await-sidecar-readiness": "true",
                "coschedule": "workspaces",
                "disable-creds-init": "false",
                "disable-inline-spec": "",
                "enable-api-fields": "beta",
                "enable-artifacts": "false",
                "enable-cel-in-whenexpression": "false",
                "enable-concise-resolver-syntax": "false",
                "enable-custom-tasks": "true",
                "enable-kubernetes-sidecar": "false",
                "enable-param-enum": "false",
                "enable-provenance-in-status": "true",
                "enable-step-actions": "true",
                "enable-tekton-oci-bundles": "false",
                "enable-wait-exponential-backoff": "false",
                "enforce-nonfalsifiability": "none",
                "keep-pod-on-cancel": "false",
                "max-result-size": "4096",
                "performance": "disable-ha: false\n",
                "require-git-ssh-secret-known-hosts": "false",
                "results-from": "termination-message",
                "running-in-environment-with-injected-sidecars": "true",
                "send-cloudevents-for-runs": "false",
                "set-security-context": "false",
                "set-security-context-read-only-root-filesystem": "false",
                "trusted-resources-verification-no-match-policy": "ignore"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "c3f586c7dd6da3c965566464d3204b3951c3ff885d8fbff0e3ff7fe68c49619b"
                },
                "creationTimestamp": "2026-03-18T18:23:39Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "feature-flags",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1305",
                "uid": "9fde7d86-a57f-4627-a068-562d771700c0"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "enable-api-fields": "stable",
                "labels-exclusion-pattern": ""
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "0e84cda2cdd031119b3c56d8ad19be276dc44ea96b5fb7c7352219278f84019f"
                },
                "creationTimestamp": "2026-03-18T18:24:10Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-triggers"
                },
                "name": "feature-flags-triggers",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "trigger-main-static-4lpr4",
                        "uid": "bbda7af2-a1bd-484d-ba98-1c4fceac7b2a"
                    }
                ],
                "resourceVersion": "1750",
                "uid": "65f6d8c4-d6cf-4280-8808-3c0ddd8f3d36"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "api-token-secret-key": "",
                "api-token-secret-name": "",
                "api-token-secret-namespace": "default",
                "default-org": "",
                "default-revision": "main",
                "default-url": "https://github.com/tektoncd/catalog.git",
                "fetch-timeout": "1m",
                "scm-type": "github",
                "server-url": ""
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "7d36eae53169ad56797d40a2cd6f2c998bb5eeac6b2af788956afa89a5190745"
                },
                "creationTimestamp": "2026-03-18T18:23:41Z",
                "labels": {
                    "app.kubernetes.io/component": "resolvers",
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "git-resolver-config",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1328",
                "uid": "17d50fac-3780-49b2-b859-cb825cfdfc03"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "fetch-timeout": "1m"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "4776d8dd1a898b0adb69e841a7aa2547e3ce254301f3cbb3b0eaab58ea9912a2"
                },
                "creationTimestamp": "2026-03-18T18:23:41Z",
                "labels": {
                    "app.kubernetes.io/component": "resolvers",
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "http-resolver-config",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1329",
                "uid": "93245982-7761-4100-a07a-737f5cab8e7a"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "default-artifact-hub-pipeline-catalog": "tekton-catalog-pipelines",
                "default-artifact-hub-task-catalog": "tekton-catalog-tasks",
                "default-kind": "task",
                "default-tekton-hub-catalog": "Tekton",
                "default-type": "artifact"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "9ca003eb53ab88ea80bcf3acbd62d9761151fe06a04da1f43e90f61a76bd0d85"
                },
                "creationTimestamp": "2026-03-18T18:23:41Z",
                "labels": {
                    "app.kubernetes.io/component": "resolvers",
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "hubresolver-config",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1330",
                "uid": "56519068-840d-4522-ad12-dd0129199cc6"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:23:36Z",
                "name": "kube-root-ca.crt",
                "namespace": "tekton-pipelines",
                "resourceVersion": "1220",
                "uid": "be0b64a1-afbe-4a53-b8ee-382e19e4320e"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "version": "v1.10.0"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "774be698051024bc8b20dddf73bbeb06e889af24bbfae6043ba1673f1f07ceb1"
                },
                "creationTimestamp": "2026-03-18T18:23:39Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "pipelines-info",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1306",
                "uid": "b1219fb9-6263-4627-8262-f3a10a09d846"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "max-size": "1000",
                "ttl": "5m"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "4fb78a3bd0549ee64593acaf3fbf6c805e7982a0fedccf13756acbd492c3d8fa"
                },
                "creationTimestamp": "2026-03-18T18:23:41Z",
                "labels": {
                    "app.kubernetes.io/component": "resolvers",
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "resolver-cache-config",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1331",
                "uid": "2da6e9a1-2e05-4936-911e-d7fa7cf56f76"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "enable-bundles-resolver": "true",
                "enable-cluster-resolver": "true",
                "enable-git-resolver": "true",
                "enable-http-resolver": "true",
                "enable-hub-resolver": "true"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "e0eba9bf246d28644a485a359590c80071a91e38d9167c35b654ea638865bd1c"
                },
                "creationTimestamp": "2026-03-18T18:23:40Z",
                "labels": {
                    "app.kubernetes.io/component": "resolvers",
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-pipelines",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines"
                },
                "name": "resolvers-feature-flags",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1323",
                "uid": "1e74dc53-17ea-4ee7-aa5d-4d003dfa586c"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "c2a7df1a5a4f55b633d80136d97202e6265c33cbaea16efcc5ee3fa17c9336be"
                },
                "creationTimestamp": "2026-03-18T18:24:35Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-chains",
                    "operator.tekton.dev/operand-name": "tektoncd-chains"
                },
                "name": "tekton-chains-config-leader-election",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "chain-knk88",
                        "uid": "ec66fa87-5eeb-4c50-9492-98e66143cbee"
                    }
                ],
                "resourceVersion": "2060",
                "uid": "bd88a157-53b3-446b-83a4-45de45cd11da"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n#\n# metrics.backend-destination field specifies the system metrics destination.\n# It supports either prometheus (the default) or stackdriver.\n# Note: Using Stackdriver will incur additional charges.\n#\nmetrics.backend-destination: prometheus\n#\n# metrics.stackdriver-project-id field specifies the Stackdriver project ID. This\n# field is optional. When running on GCE, application default credentials will be\n# used and metrics will be sent to the cluster's project if this field is\n# not provided.\n#\nmetrics.stackdriver-project-id: \"\u003cyour stackdriver project id\u003e\"\n#\n# metrics.allow-stackdriver-custom-metrics indicates whether it is allowed\n# to send metrics to Stackdriver using \"global\" resource type and custom\n# metric type. Setting this flag to \"true\" could cause extra Stackdriver\n# charge.  If metrics.backend-destination is not Stackdriver, this is\n# ignored.\n#\nmetrics.allow-stackdriver-custom-metrics: \"false\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "f0c1789bdca318ea5d7ff2b11b1444309c2f6eab5c16bcacaf3a822109d223b1"
                },
                "creationTimestamp": "2026-03-18T18:24:35Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-chains",
                    "operator.tekton.dev/operand-name": "tektoncd-chains"
                },
                "name": "tekton-chains-config-observability",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "chain-knk88",
                        "uid": "ec66fa87-5eeb-4c50-9492-98e66143cbee"
                    }
                ],
                "resourceVersion": "2067",
                "uid": "0ed11db5-eab5-4523-9bc7-3c426a467cd8"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "2fddf4b2f903f7292175ad035efe83de5f1a201d78b99f4682c1669850fb780a"
                },
                "creationTimestamp": "2026-03-18T18:23:42Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "operator.tekton.dev/operand-name": "tektoncd-pipelines",
                    "operator.tekton.dev/release": "devel"
                },
                "name": "tekton-operator-proxy-webhook-config-leader-election",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "pipeline-main-static-v7nmp",
                        "uid": "91f82231-cff3-4b1b-bbcf-47fc3d98b2b5"
                    }
                ],
                "resourceVersion": "1341",
                "uid": "5034ef11-dc21-484e-8050-ad167aee8ffd"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "config": "DB_USER=\nDB_PASSWORD=\nDB_HOST=tekton-results-postgres-service.tekton-pipelines.svc.cluster.local\nDB_PORT=5432\nDB_NAME=tekton-results\nDB_SSLMODE=disable\nDB_SSLROOTCERT=\nDB_ENABLE_AUTO_MIGRATION=true\nDB_MAX_IDLE_CONNECTIONS=10\nDB_MAX_OPEN_CONNECTIONS=10\nGRPC_WORKER_POOL=2\nK8S_QPS=5\nK8S_BURST=10\nPROFILING=false\nPROFILING_PORT=6060\nFEATURE_GATES='PartialResponse=true'\nSERVER_PORT=8080\nPROMETHEUS_PORT=9090\nPROMETHEUS_HISTOGRAM=false\nTLS_PATH=/etc/tls\nTLS_MIN_VERSION=\nTLS_CIPHER_SUITES=\nTLS_CURVE_PREFERENCES=\nAUTH_DISABLE=false\nAUTH_IMPERSONATE=true\nLOG_LEVEL=info\nSQL_LOG_LEVEL=warn\nLOGS_API=false\nLOGS_TYPE=File\nLOGS_BUFFER_SIZE=32768\nLOGS_PATH=/logs\nLOGS_TIMESTAMPS=false\nS3_BUCKET_NAME=\nS3_ENDPOINT=\nS3_HOSTNAME_IMMUTABLE=false\nS3_REGION=\nS3_ACCESS_KEY_ID=\nS3_SECRET_ACCESS_KEY=\nS3_MULTI_PART_SIZE=5242880\nGCS_BUCKET_NAME=\nSTORAGE_EMULATOR_HOST=\nCONVERTER_ENABLE=false\nCONVERTER_DB_LIMIT=50\nMAX_RETENTION=\nLOGGING_PLUGIN_PROXY_PATH=/api/logs/v1/application\nLOGGING_PLUGIN_API_URL=\nLOGGING_PLUGIN_TOKEN_PATH=/var/run/secrets/kubernetes.io/serviceaccount/token\nLOGGING_PLUGIN_NAMESPACE_KEY=kubernetes_namespace_name\nLOGGING_PLUGIN_CONTAINER_KEY=kubernetes.container_name\nLOGGING_PLUGIN_STATIC_LABELS='log_type=application'\nLOGGING_PLUGIN_CA_CERT=\nLOGGING_PLUGIN_QUERY_LIMIT=1700\nLOGGING_PLUGIN_TLS_VERIFICATION_DISABLE=false\nLOGGING_PLUGIN_FORWARDER_DELAY_DURATION=10\nLOGGING_PLUGIN_QUERY_PARAMS='direction=forward'\nLOGGING_PLUGIN_MULTIPART_REGEX=\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "f605aaed0a03346a939722c29e5200f2bb60995ace023680377e3a2aa662da3f"
                },
                "creationTimestamp": "2026-03-18T18:24:41Z",
                "labels": {
                    "app.kubernetes.io/part-of": "tekton-results",
                    "app.kubernetes.io/version": "v0.18.0",
                    "operator.tekton.dev/operand-name": "tektoncd-results"
                },
                "name": "tekton-results-api-config",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "result-qmx4b",
                        "uid": "be92967a-1c06-43c7-a657-9afa4886885c"
                    }
                ],
                "resourceVersion": "2180",
                "uid": "3308c5b4-ac13-4a37-9066-1811236bb039"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n# lease-duration is how long non-leaders will wait to try to acquire the\n# lock; 15 seconds is the value used by core kubernetes controllers.\nlease-duration: \"60s\"\n# renew-deadline is how long a leader will try to renew the lease before\n# giving up; 10 seconds is the value used by core kubernetes controllers.\nrenew-deadline: \"40s\"\n# retry-period is how long the leader election client waits between tries of\n# actions; 2 seconds is the value used by core kubernetes controllers.\nretry-period: \"10s\"\n# buckets is the number of buckets used to partition key space of each\n# Reconciler. If this number is M and the replica number of the controller\n# is N, the N replicas will compete for the M buckets. The owner of a\n# bucket will take care of the reconciling for the keys partitioned into\n# that bucket.\nbuckets: \"1\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "c8f22113dcb1fde0316965483b03b3c3bb6e80f8e3b2652664a6fd76ae95a567"
                },
                "creationTimestamp": "2026-03-18T18:24:41Z",
                "labels": {
                    "app.kubernetes.io/name": "tekton-results-leader-election",
                    "app.kubernetes.io/part-of": "tekton-results",
                    "app.kubernetes.io/version": "v0.18.0",
                    "operator.tekton.dev/operand-name": "tektoncd-results"
                },
                "name": "tekton-results-config-leader-election",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "result-qmx4b",
                        "uid": "be92967a-1c06-43c7-a657-9afa4886885c"
                    }
                ],
                "resourceVersion": "2181",
                "uid": "dd2c3171-fcc4-4961-8414-de4447072c1e"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "loglevel.watcher": "info",
                "zap-logger-config": "{\n  \"level\": \"info\",\n  \"development\": false,\n  \"outputPaths\": [\"stdout\"],\n  \"errorOutputPaths\": [\"stderr\"],\n  \"encoding\": \"json\",\n  \"encoderConfig\": {\n    \"timeKey\": \"time\",\n    \"levelKey\": \"level\",\n    \"nameKey\": \"logger\",\n    \"callerKey\": \"caller\",\n    \"messageKey\": \"msg\",\n    \"stacktraceKey\": \"stacktrace\",\n    \"lineEnding\": \"\",\n    \"levelEncoder\": \"\",\n    \"timeEncoder\": \"iso8601\",\n    \"durationEncoder\": \"string\",\n    \"callerEncoder\": \"\"\n  }\n}\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "677652d72ef1b2e3b5cb0e87b24d258ef775d549385402fea3352306a6f2c864"
                },
                "creationTimestamp": "2026-03-18T18:24:41Z",
                "labels": {
                    "app.kubernetes.io/name": "tekton-results-logging",
                    "app.kubernetes.io/part-of": "tekton-results",
                    "app.kubernetes.io/version": "v0.18.0",
                    "operator.tekton.dev/operand-name": "tektoncd-results"
                },
                "name": "tekton-results-config-logging",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "result-qmx4b",
                        "uid": "be92967a-1c06-43c7-a657-9afa4886885c"
                    }
                ],
                "resourceVersion": "2182",
                "uid": "891dadd3-9c9d-41f4-b75a-aac461879816"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "_example": "################################\n#                              #\n#    EXAMPLE CONFIGURATION     #\n#                              #\n################################\n\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n\n# metrics.backend-destination field specifies the system metrics destination.\n# It supports either prometheus (the default) or stackdriver.\n# Note: Using Stackdriver will incur additional charges.\nmetrics.backend-destination: prometheus\n\n# metrics.stackdriver-project-id field specifies the Stackdriver project ID. This\n# field is optional. When running on GCE, application default credentials will be\n# used and metrics will be sent to the cluster's project if this field is\n# not provided.\nmetrics.stackdriver-project-id: \"\u003cyour stackdriver project id\u003e\"\n\n# metrics.allow-stackdriver-custom-metrics indicates whether it is allowed\n# to send metrics to Stackdriver using \"global\" resource type and custom\n# metric type. Setting this flag to \"true\" could cause extra Stackdriver\n# charge.  If metrics.backend-destination is not Stackdriver, this is\n# ignored.\nmetrics.allow-stackdriver-custom-metrics: \"false\"\nmetrics.taskrun.level: \"task\"\nmetrics.taskrun.duration-type: \"histogram\"\nmetrics.pipelinerun.level: \"pipeline\"\nmetrics.pipelinerun.duration-type: \"histogram\"\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "271e0e4da98d5875ff878c60df82c48759f0de19604afef81af4dd818c0f2458"
                },
                "creationTimestamp": "2026-03-18T18:24:41Z",
                "labels": {
                    "app.kubernetes.io/name": "tekton-results-observability",
                    "app.kubernetes.io/part-of": "tekton-results",
                    "app.kubernetes.io/version": "v0.18.0",
                    "operator.tekton.dev/operand-name": "tektoncd-results"
                },
                "name": "tekton-results-config-observability",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "result-qmx4b",
                        "uid": "be92967a-1c06-43c7-a657-9afa4886885c"
                    }
                ],
                "resourceVersion": "2184",
                "uid": "c50b1dee-ce81-4e56-977d-38f9d580f19a"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "defaultRetention": "30",
                "runAt": "5 5 * * 0"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "0f08b1895474df69d7e15bc2e507d9d6e942052e2243bd61826b39d1dcd74e78"
                },
                "creationTimestamp": "2026-03-18T18:24:41Z",
                "labels": {
                    "app.kubernetes.io/name": "tekton-results-retention-policy",
                    "app.kubernetes.io/part-of": "tekton-results",
                    "app.kubernetes.io/version": "v0.18.0",
                    "operator.tekton.dev/operand-name": "tektoncd-results"
                },
                "name": "tekton-results-config-results-retention-policy",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "result-qmx4b",
                        "uid": "be92967a-1c06-43c7-a657-9afa4886885c"
                    }
                ],
                "resourceVersion": "2188",
                "uid": "caa4afea-46ba-40d5-8d2b-4d966b252060"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "version": "v0.18.0"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "47a0e91e924c6dad606371c38b6134b8bd5e3c935a7cd31cffa8013f9b637a99"
                },
                "creationTimestamp": "2026-03-18T18:24:41Z",
                "labels": {
                    "app.kubernetes.io/name": "tekton-results-info",
                    "app.kubernetes.io/part-of": "tekton-results",
                    "app.kubernetes.io/version": "v0.18.0",
                    "operator.tekton.dev/operand-name": "tektoncd-results"
                },
                "name": "tekton-results-info",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "result-qmx4b",
                        "uid": "be92967a-1c06-43c7-a657-9afa4886885c"
                    }
                ],
                "resourceVersion": "2189",
                "uid": "b3f6553b-069f-4f3d-b16b-a71fc4fda096"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "POSTGRES_DB": "tekton-results"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "adb6b4a529e56515913ab98a239b5cb65ba15f9c5b61c313fcf177be2422fe4e"
                },
                "creationTimestamp": "2026-03-18T18:24:42Z",
                "labels": {
                    "app.kubernetes.io/name": "tekton-results-postgres",
                    "app.kubernetes.io/part-of": "tekton-results",
                    "app.kubernetes.io/version": "devel",
                    "operator.tekton.dev/operand-name": "tektoncd-results"
                },
                "name": "tekton-results-postgres",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "result-qmx4b",
                        "uid": "be92967a-1c06-43c7-a657-9afa4886885c"
                    }
                ],
                "resourceVersion": "2198",
                "uid": "502cbc82-1f13-420e-8d96-5ec82c13874c"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "version": "v0.35.0"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "operator.tekton.dev/last-applied-hash": "16b5e71ae0ad3689ccc162f6c88e56345efa6aa7425619d890785daa6306cdf0"
                },
                "creationTimestamp": "2026-03-18T18:24:10Z",
                "labels": {
                    "app.kubernetes.io/instance": "default",
                    "app.kubernetes.io/part-of": "tekton-triggers",
                    "operator.tekton.dev/operand-name": "tektoncd-triggers"
                },
                "name": "triggers-info",
                "namespace": "tekton-pipelines",
                "ownerReferences": [
                    {
                        "apiVersion": "operator.tekton.dev/v1alpha1",
                        "blockOwnerDeletion": true,
                        "controller": true,
                        "kind": "TektonInstallerSet",
                        "name": "trigger-main-static-4lpr4",
                        "uid": "bbda7af2-a1bd-484d-ba98-1c4fceac7b2a"
                    }
                ],
                "resourceVersion": "1751",
                "uid": "ba877919-c406-4fe4-af8c-1ef4011adc7e"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:26:58Z",
                "name": "kube-root-ca.crt",
                "namespace": "user-ns1",
                "resourceVersion": "3688",
                "uid": "49342013-d8e8-4c1a-9552-7adb616ce5a0"
            }
        },
        {
            "apiVersion": "v1",
            "data": {
                "ca.crt": "-----BEGIN CERTIFICATE-----\nMIIDBTCCAe2gAwIBAgIIIT+eC0LIPs0wDQYJKoZIhvcNAQELBQAwFTETMBEGA1UE\nAxMKa3ViZXJuZXRlczAeFw0yNjAzMTgxODE2MTBaFw0zNjAzMTUxODIxMTBaMBUx\nEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQDrNEiEYI21MemvZPfDiUb8raz26pHhwwOBSmctUB/GZJGC6vu0nU7rfRfU\np2CocKlVnWmBaXb/W1fC02Vjb/4yHX/gZnDUs4Fd9o5agnnpgtrlZBzLZGgZtHk8\nc+4MIr5NfEOMTD3ozRuMWZhAfs/C3EjaiUYc7E1Ah0dBxu7u+9Gw3DRGQOgC8iYP\nEMKGbiJ2f6HLuVuXgpzeJW597xSeq6UVvc2ue7MFKyiVovKJDcgPKsOoPhBgbqTX\n2flhjA9K2W8m3BtnQpesfsTTaXQbHZXYmPfs/JU63xQWC5c7a6XVdaJsyIJf+3FC\nuhFoERVM8L9aAg+PrWLmj+ZoONRVAgMBAAGjWTBXMA4GA1UdDwEB/wQEAwICpDAP\nBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQP6XxWmoVjkV/OYkkT2zW2T8zoRDAV\nBgNVHREEDjAMggprdWJlcm5ldGVzMA0GCSqGSIb3DQEBCwUAA4IBAQCa/csMGmaN\nsIQWjWrPCGM/U3LGiC7V0aiQJdFtGDALzHYPCRrNiaQpqh64OFqltbx+frByYFdC\nmBYGPDCFqXaIiYWTx6It9wodlLGDwx/N/qHhMWwkxfo4MhQSdDXtwJCeAB0ip2MS\n42SNHGyGqx2BfdnX0jUkdUJdA4IL2bZ2JIrYRIvDSuJBrnjciwEWU8BpY/VgVADb\nUzlY4QhqVWD1GBf/0qQUNFfh31YKliAOf1Kz3GRQGtIFFzm3MTsaWJuUiBpqa141\nConcQKMMom0XiDcJdHUpzjJTnSwbhhj6HurJbQOBPn1V6TQp0wtviMXgAJ59qFho\ncW0zSogHFV/Q\n-----END CERTIFICATE-----\n"
            },
            "kind": "ConfigMap",
            "metadata": {
                "annotations": {
                    "kubernetes.io/description": "Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters."
                },
                "creationTimestamp": "2026-03-18T18:26:58Z",
                "name": "kube-root-ca.crt",
                "namespace": "user-ns2",
                "resourceVersion": "3693",
                "uid": "224cb354-8b11-4e4e-b09d-7da6c1278eab"
            }
        }
    ],
    "kind": "List",
    "metadata": {
        "resourceVersion": ""
    }
}
