--- apiVersion: v1 items: - apiVersion: v1 data: _example: "################################\n# #\n# \ EXAMPLE CONFIGURATION #\n# #\n################################\n\n# This block is not actually functional configuration,\n# but serves to illustrate the available configuration\n# options and document them in a way that is accessible\n# to users that `kubectl edit` this config map.\n#\n# These sample configuration options may be copied out of\n# this example block and unindented to be in the data block\n# to actually change the configuration.\n\n# ====================================== EXPLAINERS CONFIGURATION ======================================\n# Example\nexplainers: |-\n {\n \"art\": {\n \"image\" : \"kserve/art-explainer\",\n \ \"defaultImageVersion\": \"latest\"\n }\n }\n# Art Explainer runtime configuration\n explainers: |-\n {\n # Art explainer runtime configuration\n \"art\": {\n # image contains the default Art explainer serving runtime image uri.\n \"image\" : \"kserve/art-explainer\",\n \ \n # defautltImageVersion contains the Art explainer serving runtime default image version.\n \"defaultImageVersion\": \"latest\"\n }\n \ }\n# ====================================== ISVC CONFIGURATION ======================================\n# Example - setting custom annotation \n inferenceService: |-\n {\n \"serviceAnnotationDisallowedList\": [\n \"my.custom.annotation/1\" \n ],\n \"serviceLabelDisallowedList\": [\n \"my.custom.label.1\" \n ]\n }\n# Example - setting custom annotation\ninferenceService: |-\n {\n # ServiceAnnotationDisallowedList is a list of annotations that are not allowed to be propagated to Knative \n \ # revisions, which prevents the reconciliation loop to be triggered if the annotations is \n # configured here are used.\n # Default values are:\n \ # \"autoscaling.knative.dev/min-scale\",\n # \"autoscaling.knative.dev/max-scale\",\n \ # \"internal.serving.kserve.io/storage-initializer-sourceuri\",\n # \ \"kubectl.kubernetes.io/last-applied-configuration\",\n # \"modelFormat\"\n \ # Any new value will be appended to the list.\n \"serviceAnnotationDisallowedList\": [\n \"my.custom.annotation/1\" \n ],\n # ServiceLabelDisallowedList is a list of labels that are not allowed to be propagated to Knative revisions\n \ # which prevents the reconciliation loop to be triggered if the labels is configured here are used.\n \"serviceLabelDisallowedList\": [\n \"my.custom.label.1\" \ \n ]\n } \n# Example - setting custom resource\ninferenceService: |-\n \ {\n \"resource\": {\n \"cpuLimit\": \"1\",\n \"memoryLimit\": \"2Gi\",\n \"cpuRequest\": \"1\",\n \"memoryRequest\": \"2Gi\"\n }\n \ }\n# Example - setting custom resource\ninferenceService: |-\n {\n # resource contains the default resource configuration for the inference service.\n # you can override this configuration by specifying the resources in the inference service yaml.\n # If you want to unbound the resource (limits and requests), you can set the value to null or \"\" \n # or just remove the specific field from the config.\n \"resource\": {\n # cpuLimit is the limits.cpu to set for the inference service.\n \"cpuLimit\": \"1\",\n\n # memoryLimit is the limits.memory to set for the inference service.\n \"memoryLimit\": \"2Gi\",\n\n # cpuRequest is the requests.cpu to set for the inference service.\n \"cpuRequest\": \"1\",\n\n # memoryRequest is the requests.memory to set for the inference service.\n \"memoryRequest\": \"2Gi\"\n }\n }\n# ====================================== MultiNode CONFIGURATION ======================================\n# Example \nmultiNode: |-\n {\n \"customGPUResourceTypeList\": [\n \"custom.com/gpu\"\n \ ]\n }\n# Example of multinode configuration\nmultiNode: |-\n { \n \ # CustomGPUResourceTypeList is a list of custom GPU resource types intended to identify the GPU type of a resource,\n # not to restrict the user from using a specific GPU type.\n # The MultiNode runtime pod will dynamically add GPU resources based on the registered GPU types.\n \"customGPUResourceTypeList\": [\n \"custom.com/gpu\"\n ]\n } \n # ====================================== OTelCollector CONFIGURATION ======================================\n # Example\n opentelemetryCollector: |-\n {\n # scrapeInterval is the interval at which the OpenTelemetry Collector will scrape the metrics.\n \"scrapeInterval\": \"5s\",\n # metricScalerEndpoint is the endpoint from which the KEDA's ScaledObject will scrape the metrics.\n \"metricScalerEndpoint\": \"keda-otel-scaler.keda.svc:4318\",\n \ # metricReceiverEndpoint is the endpoint from which the OpenTelemetry Collector will scrape the metrics.\n \"metricReceiverEndpoint\": \"keda-otel-scaler.keda.svc:4317\"\n \ }\n\n # ====================================== AUTOSCALER CONFIGURATION ======================================\n # Example\n autoscaler: |-\n {\n # scaleUpStabilizationWindowSeconds is the stabilization window in seconds for scale up.\n \"scaleUpStabilizationWindowSeconds\": \"0\",\n # scaleDownStabilizationWindowSeconds is the stabilization window in seconds for scale down.\n \"scaleDownStabilizationWindowSeconds\": \"300\"\n \ }\n \n # ====================================== STORAGE INITIALIZER CONFIGURATION ======================================\n # Example\n storageInitializer: |-\n \ {\n \"image\" : \"kserve/storage-initializer:latest\",\n \"memoryRequest\": \"100Mi\",\n \"memoryLimit\": \"1Gi\",\n \"cpuRequest\": \"100m\",\n \ \"cpuLimit\": \"1\",\n \"caBundleConfigMapName\": \"\",\n \"caBundleVolumeMountPath\": \"/etc/ssl/custom-certs\",\n \"enableModelcar\": false,\n \"cpuModelcar\": \"10m\",\n \"memoryModelcar\": \"15Mi\"\n }\n storageInitializer: |-\n \ {\n # image contains the default storage initializer image uri.\n \"image\" : \"kserve/storage-initializer:latest\",\n \n # memoryRequest is the requests.memory to set for the storage initializer init container.\n \"memoryRequest\": \"100Mi\",\n \n # memoryLimit is the limits.memory to set for the storage initializer init container.\n \"memoryLimit\": \"1Gi\",\n \n # cpuRequest is the requests.cpu to set for the storage initializer init container.\n \ \"cpuRequest\": \"100m\",\n \n # cpuLimit is the limits.cpu to set for the storage initializer init container.\n \"cpuLimit\": \"1\",\n \ \n # caBundleConfigMapName is the ConfigMap will be copied to a user namespace for the storage initializer init container.\n \"caBundleConfigMapName\": \"\",\n\n # caBundleVolumeMountPath is the mount point for the configmap set by caBundleConfigMapName for the storage initializer init container.\n \"caBundleVolumeMountPath\": \"/etc/ssl/custom-certs\",\n\n # enableModelcar enabled allows you to directly access an OCI container image by\n # using a source URL with an \"oci://\" schema.\n \"enableModelcar\": false,\n\n # cpuModelcar is the cpu request and limit that is used for the passive modelcar container. It can be\n # set very low, but should be allowed by any Kubernetes LimitRange that might apply.\n \"cpuModelcar\": \"10m\",\n\n # cpuModelcar is the memory request and limit that is used for the passive modelcar container. It can be\n # set very low, but should be allowed by any Kubernetes LimitRange that might apply.\n \"memoryModelcar\": \"15Mi\",\n\n # uidModelcar is the UID under with which the modelcar process and the main container is running.\n \ # Some Kubernetes clusters might require this to be root (0). If not set the user id is left untouched (default)\n \"uidModelcar\": 10\n }\n \n # ====================================== CREDENTIALS ======================================\n # Example\n credentials: |-\n {\n \"storageSpecSecretName\": \"storage-config\",\n \ \"storageSecretNameAnnotation\": \"serving.kserve.io/storageSecretName\",\n \ \"gcs\": {\n \"gcsCredentialFileName\": \"gcloud-application-credentials.json\"\n \ },\n \"s3\": {\n \"s3AccessKeyIDName\": \"AWS_ACCESS_KEY_ID\",\n \ \"s3SecretAccessKeyName\": \"AWS_SECRET_ACCESS_KEY\",\n \"s3Endpoint\": \"\",\n \"s3UseHttps\": \"\",\n \"s3Region\": \"\",\n \"s3VerifySSL\": \"\",\n \"s3UseVirtualBucket\": \"\",\n \"s3UseAccelerate\": \"\",\n \"s3UseAnonymousCredential\": \"\",\n \"s3CABundleConfigMap\": \"\",\n \"s3CABundle\": \"\"\n }\n }\n # This is a global configuration used for downloading models from the cloud storage.\n # You can override this configuration by specifying the annotations on service account or static secret.\n # https://kserve.github.io/website/master/modelserving/storage/s3/s3/\n # For a quick reference about AWS ENV variables:\n # AWS Cli: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html\n # Boto: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html#using-environment-variables\n #\n # The `s3AccessKeyIDName` and `s3SecretAccessKeyName` fields are only used from this configmap when static credentials (IAM User Access Key Secret)\n # are used as the authentication method for AWS S3.\n # The rest of the fields are used in both authentication methods (IAM Role for Service Account & IAM User Access Key Secret) if a non-empty value is provided.\n credentials: |-\n \ {\n # storageSpecSecretName contains the secret name which has the credentials for downloading the model.\n # This option is used when specifying the storage spec on isvc yaml.\n \"storageSpecSecretName\": \"storage-config\",\n\n \ # The annotation can be specified on isvc yaml to allow overriding with the secret name reference from the annotation value.\n # When using storageUri the order of the precedence is: secret name reference annotation > secret name references from service account\n # When using storageSpec the order of the precedence is: secret name reference annotation > storageSpecSecretName in configmap\n\n # Configuration for google cloud storage\n \"gcs\": {\n # gcsCredentialFileName specifies the filename of the gcs credential\n \ \"gcsCredentialFileName\": \"gcloud-application-credentials.json\"\n \ },\n \n # Configuration for aws s3 storage. This add the corresponding environmental variables to the storage initializer init container.\n # For more info on s3 storage see https://kserve.github.io/website/master/modelserving/storage/s3/s3/\n \ \"s3\": {\n # s3AccessKeyIDName specifies the s3 access key id name\n \"s3AccessKeyIDName\": \"AWS_ACCESS_KEY_ID\",\n \n # s3SecretAccessKeyName specifies the s3 secret access key name\n \"s3SecretAccessKeyName\": \"AWS_SECRET_ACCESS_KEY\",\n \n # s3Endpoint specifies the s3 endpoint\n \"s3Endpoint\": \"\",\n \n # s3UseHttps controls whether to use secure https or unsecure http to download models.\n \ # Allowed values are 0 and 1.\n \"s3UseHttps\": \"\",\n \n \ # s3Region specifies the region of the bucket.\n \"s3Region\": \"\",\n \n # s3VerifySSL controls whether to verify the tls/ssl certificate.\n \"s3VerifySSL\": \"\",\n \n # s3UseVirtualBucket configures whether it is a virtual bucket or not.\n \"s3UseVirtualBucket\": \"\",\n\n # s3UseAccelerate configures whether to use transfer acceleration.\n \ \"s3UseAccelerate\": \"\",\n \n # s3UseAnonymousCredential configures whether to use anonymous credentials to download the model or not.\n \ \"s3UseAnonymousCredential\": \"\",\n\n # s3CABundleConfigMap specifies the mounted CA bundle config map name.\n \"s3CABundleConfigMap\": \"\",\n\n # s3CABundle specifies the full path (mount path + file name) for the mounted config map data when used with a configured CA bundle config map.\n # s3CABundle specifies the path to a certificate bundle to use for HTTPS certificate validation when used absent of a configured CA bundle config map.\n \"s3CABundle\": \"\"\n }\n }\n \n # ====================================== INGRESS CONFIGURATION ======================================\n # Example\n ingress: |-\n { \n \"enableGatewayApi\": false,\n \"kserveIngressGateway\": \"kserve/kserve-ingress-gateway\",\n \"ingressGateway\" : \"knative-serving/knative-ingress-gateway\",\n \ \"localGateway\" : \"knative-serving/knative-local-gateway\",\n \"localGatewayService\" : \"knative-local-gateway.istio-system.svc.cluster.local\",\n \"ingressDomain\" \ : \"example.com\",\n \"additionalIngressDomains\": [\"additional-example.com\", \"additional-example-1.com\"],\n \"ingressClassName\" : \"istio\",\n \"domainTemplate\": \"{{ .Name }}-{{ .Namespace }}.{{ .IngressDomain }}\",\n \"urlScheme\": \"http\",\n \"disableIstioVirtualHost\": false,\n \"disableIngressCreation\": false\n }\n ingress: |-\n { \n # enableGatewayApi specifies whether to use Gateway API instead of Ingress to serve external traffic.\n \"enableGatewayApi\": false,\n\n # KServe implements [Gateway API](https://gateway-api.sigs.k8s.io/) to serve external traffic. \n # By default, KServe configures a default gateway to serve external traffic.\n # But, KServe can be configured to use a custom gateway by modifying this configuration.\n # The gateway should be specified in format /\n # NOTE: This configuration only applicable for raw deployment.\n \"kserveIngressGateway\": \"kserve/kserve-ingress-gateway\",\n \n # ingressGateway specifies the ingress gateway to serve external traffic.\n # The gateway should be specified in format /\n # NOTE: This configuration only applicable for serverless deployment with Istio configured as network layer.\n \ \"ingressGateway\" : \"knative-serving/knative-ingress-gateway\",\n \n \ # knativeLocalGatewayService specifies the hostname of the Knative's local gateway service.\n # The default KServe configurations are re-using the Istio local gateways for Knative. In this case, this\n # knativeLocalGatewayService field can be left unset. When unset, the value of \"localGatewayService\" will be used.\n # However, sometimes it may be better to have local gateways specifically for KServe (e.g. when enabling strict mTLS in Istio).\n # Under such setups where KServe is needed to have its own local gateways, the values of the \"localGateway\" and\n # \"localGatewayService\" should point to the KServe local gateways. Then, this knativeLocalGatewayService field\n \ # should point to the Knative's local gateway service.\n # NOTE: This configuration only applicable for serverless deployment with Istio configured as network layer.\n \"knativeLocalGatewayService\": \"\",\n \n # localGateway specifies the gateway which handles the network traffic within the cluster.\n # NOTE: This configuration only applicable for serverless deployment with Istio configured as network layer.\n \"localGateway\" : \"knative-serving/knative-local-gateway\",\n \n # localGatewayService specifies the hostname of the local gateway service.\n # NOTE: This configuration only applicable for serverless deployment with Istio configured as network layer.\n \ \"localGatewayService\" : \"knative-local-gateway.istio-system.svc.cluster.local\",\n \n # ingressDomain specifies the domain name which is used for creating the url.\n # If ingressDomain is empty then example.com is used as default domain.\n # NOTE: This configuration only applicable for raw deployment.\n \ \"ingressDomain\" : \"example.com\",\n\n # additionalIngressDomains specifies the additional domain names which are used for creating the url.\n \ \"additionalIngressDomains\": [\"additional-example.com\", \"additional-example-1.com\"]\n\n \ # ingressClassName specifies the ingress controller to use for ingress traffic.\n # This is optional and if omitted the default ingress in the cluster is used.\n # https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class\n \ # NOTE: This configuration only applicable for raw deployment.\n \"ingressClassName\" : \"istio\",\n \n # domainTemplate specifies the template for generating domain/url for each inference service by combining variable from:\n # Name of the inference service ( {{ .Name}} )\n # Namespace of the inference service ( {{ .Namespace }} )\n # Annotation of the inference service ( {{ .Annotations.key }} )\n # Label of the inference service ( {{ .Labels.key }} )\n # IngressDomain ( {{ .IngressDomain }} )\n # If domain template is empty the default template {{ .Name }}-{{ .Namespace }}.{{ .IngressDomain }} is used.\n # NOTE: This configuration only applicable for raw deployment.\n \ \"domainTemplate\": \"{{ .Name }}-{{ .Namespace }}.{{ .IngressDomain }}\",\n \n # urlScheme specifies the url scheme to use for inference service and inference graph.\n # If urlScheme is empty then by default http is used.\n \"urlScheme\": \"http\",\n \n # disableIstioVirtualHost controls whether to use istio as network layer.\n # By default istio is used as the network layer. When DisableIstioVirtualHost is true, KServe does not\n # create the top level virtual service thus Istio is no longer required for serverless mode.\n # By setting this field to true, user can use other networking layers supported by knative.\n # For more info https://github.com/kserve/kserve/pull/2380, https://kserve.github.io/website/master/admin/serverless/kourier_networking/.\n \ # NOTE: This configuration is only applicable to serverless deployment.\n \ \"disableIstioVirtualHost\": false,\n\n # disableIngressCreation controls whether to disable ingress creation for raw deployment mode.\n \"disableIngressCreation\": false,\n \n # pathTemplate specifies the template for generating path based url for each inference service.\n # The following variables can be used in the template for generating url.\n # Name of the inference service ( {{ .Name}} )\n # Namespace of the inference service ( {{ .Namespace }} )\n # For more info https://github.com/kserve/kserve/issues/2257.\n \ # NOTE: This configuration only applicable to serverless deployment.\n \ \"pathTemplate\": \"/serving/{{ .Namespace }}/{{ .Name }}\"\n }\n \n # ====================================== LOGGER CONFIGURATION ======================================\n # Example\n logger: |-\n {\n \"image\" : \"kserve/agent:latest\",\n \ \"memoryRequest\": \"100Mi\",\n \"memoryLimit\": \"1Gi\",\n \"cpuRequest\": \"100m\",\n \"cpuLimit\": \"1\",\n \"defaultUrl\": \"http://default-broker\"\n \ }\n logger: |-\n {\n # image contains the default logger image uri.\n \ \"image\" : \"kserve/agent:latest\",\n \n # memoryRequest is the requests.memory to set for the logger container.\n \"memoryRequest\": \"100Mi\",\n \n # memoryLimit is the limits.memory to set for the logger container.\n \"memoryLimit\": \"1Gi\",\n \n # cpuRequest is the requests.cpu to set for the logger container.\n \"cpuRequest\": \"100m\",\n \n # cpuLimit is the limits.cpu to set for the logger container.\n \"cpuLimit\": \"1\",\n \n # defaultUrl specifies the default logger url. If logger is not specified in the resource this url is used.\n \"defaultUrl\": \"http://default-broker\"\n }\n \n # ====================================== BATCHER CONFIGURATION ======================================\n # Example\n batcher: |-\n {\n \"image\" : \"kserve/agent:latest\",\n \"memoryRequest\": \"1Gi\",\n \"memoryLimit\": \"1Gi\",\n \"cpuRequest\": \"1\",\n \ \"cpuLimit\": \"1\",\n \"maxBatchSize\": \"32\",\n \"maxLatency\": \"5000\"\n }\n batcher: |-\n {\n # image contains the default batcher image uri.\n \"image\" : \"kserve/agent:latest\",\n \n # memoryRequest is the requests.memory to set for the batcher container.\n \"memoryRequest\": \"1Gi\",\n \n # memoryLimit is the limits.memory to set for the batcher container.\n \"memoryLimit\": \"1Gi\",\n \n # cpuRequest is the requests.cpu to set for the batcher container.\n \"cpuRequest\": \"1\",\n \ \n # cpuLimit is the limits.cpu to set for the batcher container.\n \ \"cpuLimit\": \"1\"\n\n # maxBatchSize is the default maximum batch size for batcher.\n \"maxBatchSize\": \"32\",\n\n # maxLatency is the default maximum latency in milliseconds for batcher to wait and collect the batch.\n \"maxLatency\": \"5000\"\n }\n \n # ====================================== AGENT CONFIGURATION ======================================\n # Example\n agent: |-\n {\n \"image\" : \"kserve/agent:latest\",\n \"memoryRequest\": \"100Mi\",\n \"memoryLimit\": \"1Gi\",\n \"cpuRequest\": \"100m\",\n \ \"cpuLimit\": \"1\"\n }\n agent: |-\n {\n # image contains the default agent image uri.\n \"image\" : \"kserve/agent:latest\",\n \ \n # memoryRequest is the requests.memory to set for the agent container.\n \ \"memoryRequest\": \"100Mi\",\n \n # memoryLimit is the limits.memory to set for the agent container.\n \"memoryLimit\": \"1Gi\",\n \n \ # cpuRequest is the requests.cpu to set for the agent container.\n \"cpuRequest\": \"100m\",\n \n # cpuLimit is the limits.cpu to set for the agent container.\n \"cpuLimit\": \"1\"\n }\n \n # ====================================== ROUTER CONFIGURATION ======================================\n # Example\n router: |-\n {\n \"image\" : \"kserve/router:latest\",\n \"memoryRequest\": \"100Mi\",\n \"memoryLimit\": \"1Gi\",\n \"cpuRequest\": \"100m\",\n \ \"cpuLimit\": \"1\",\n \"headers\": {\n \"propagate\": []\n },\n \"imagePullPolicy\": \"IfNotPresent\",\n \"imagePullSecrets\": [\"docker-secret\"]\n }\n # router is the implementation of inference graph.\n router: |-\n {\n # image contains the default router image uri.\n \"image\" : \"kserve/router:latest\",\n \n # memoryRequest is the requests.memory to set for the router container.\n \"memoryRequest\": \"100Mi\",\n \n \ # memoryLimit is the limits.memory to set for the router container.\n \ \"memoryLimit\": \"1Gi\",\n \n # cpuRequest is the requests.cpu to set for the router container.\n \"cpuRequest\": \"100m\",\n \n \ # cpuLimit is the limits.cpu to set for the router container.\n \"cpuLimit\": \"1\",\n \n # Propagate the specified headers to all the steps specified in an InferenceGraph. \n # You can either specify the exact header names or use [Golang supported regex patterns]\n # (https://pkg.go.dev/regexp/syntax@go1.21.3#hdr-Syntax) to propagate multiple headers.\n \"headers\": {\n \"propagate\": [\n \"Authorization\",\n \"Test-Header-*\",\n \"*Trace-Id*\"\n \ ]\n }\n\n # imagePullPolicy specifies when the router image should be pulled from registry.\n \"imagePullPolicy\": \"IfNotPresent\",\n \ \n # # imagePullSecrets specifies the list of secrets to be used for pulling the router image from registry.\n # https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n \ \"imagePullSecrets\": [\"docker-secret\"]\n }\n \n# ====================================== DEPLOYMENT CONFIGURATION ======================================\n# Example\ndeploy: |-\n {\n \"defaultDeploymentMode\": \"Serverless\",\n \"deploymentRolloutStrategy\": {\n \"defaultRollout\": {\n \"maxSurge\": \"1\",\n \"maxUnavailable\": \"1\"\n }\n }\n }\n\ndeploy: |-\n {\n # defaultDeploymentMode specifies the default deployment mode of the kserve. The supported values are\n # Standard and Knative. Users can override the deployment mode at service level\n # by adding the annotation serving.kserve.io/deploymentMode.\n # \"defaultDeploymentMode\": \"Standard\",\n # deploymentRolloutStrategy specifies the default rollout strategy for the Standard deployment mode\n # \"deploymentRolloutStrategy\": {\n # defaultRollout specifies the default rollout configuration using Kubernetes deployment strategy\n # \"defaultRollout\": {\n # maxSurge specifies the maximum number of pods that can be created above the desired replica count\n # Can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%)\n # \"maxSurge\": \"1\",\n # maxUnavailable specifies the maximum number of pods that can be unavailable during the update\n # Can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%)\n \ # \"maxUnavailable\": \"1\"\n # }\n # }\n }\n\n # ====================================== SERVICE CONFIGURATION ======================================\n # Example\n service: |-\n {\n \"serviceClusterIPNone\": false\n }\n service: |-\n {\n \ # ServiceClusterIPNone is a boolean flag to indicate if the service should have a clusterIP set to None.\n # If the DeploymentMode is Raw, the default value for ServiceClusterIPNone if not set is false\n # \"serviceClusterIPNone\": \ false\n }\n\n # ====================================== METRICS CONFIGURATION ======================================\n # Example\n metricsAggregator: |-\n \ {\n \"enableMetricAggregation\": \"false\",\n \"enablePrometheusScraping\" : \"false\"\n }\n # For more info see https://github.com/kserve/kserve/blob/master/qpext/README.md\n metricsAggregator: |-\n {\n # enableMetricAggregation configures metric aggregation annotation. This adds the annotation serving.kserve.io/enable-metric-aggregation to every\n # service with the specified boolean value. If true enables metric aggregation in queue-proxy by setting env vars in the queue proxy container\n \ # to configure scraping ports.\n \"enableMetricAggregation\": \"false\",\n \ \n # enablePrometheusScraping configures metric aggregation annotation. This adds the annotation serving.kserve.io/enable-metric-aggregation to every\n \ # service with the specified boolean value. If true, prometheus annotations are added to the pod. If serving.kserve.io/enable-metric-aggregation is false,\n \ # the prometheus port is set with the default prometheus scraping port 9090, otherwise the prometheus port annotation is set with the metric aggregation port.\n \"enablePrometheusScraping\" : \"false\"\n }\n \n # ====================================== LOCALMODEL CONFIGURATION ======================================\n # Example\n localModel: |-\n {\n \"enabled\": false,\n # jobNamespace specifies the namespace where the download job will be created.\n \"jobNamespace\": \"kserve-localmodel-jobs\",\n # defaultJobImage specifies the default image used for the download job.\n \"defaultJobImage\" : \"kserve/storage-initializer:latest\",\n \ # Kubernetes modifies the filesystem group ID on the attached volume.\n \ \"fsGroup\": 1000,\n # TTL for the download job after it is finished.\n \ \"jobTTLSecondsAfterFinished\": 3600,\n # The frequency at which the local model agent reconciles the local models\n # This is to detect if models are missing from local disk\n \"reconcilationFrequencyInSecs\": 60,\n # This is to disable localmodel pv and pvc management for namespaces without isvcs\n \ \"disableVolumeManagement\": false\n }" agent: |- { "cpuLimit": "1", "cpuRequest": "100m", "image": "quay.io/opendatahub/kserve-agent@sha256:67a41ae3021ba2e5287597cf17a0636fe64b6e922a6ed1343bc133b720f5df94", "memoryLimit": "1Gi", "memoryRequest": "100Mi" } autoscaler: |- { "scaleUpStabilizationWindowSeconds": "0", "scaleDownStabilizationWindowSeconds": "300" } autoscaling-wva-controller-config: |- { "prometheus": { "url": "https://thanos-querier.openshift-monitoring.svc.cluster.local:9091", "authModes": "bearer", "triggerAuthName": "ai-inference-keda-thanos", "triggerAuthKind": "ClusterTriggerAuthentication" } } batcher: |- { "cpuLimit": "1", "cpuRequest": "1", "image": "quay.io/opendatahub/kserve-agent@sha256:67a41ae3021ba2e5287597cf17a0636fe64b6e922a6ed1343bc133b720f5df94", "memoryLimit": "1Gi", "memoryRequest": "1Gi" } credentials: |- { "storageSpecSecretName": "storage-config", "storageSecretNameAnnotation": "serving.kserve.io/storageSecretName", "gcs": { "gcsCredentialFileName": "gcloud-application-credentials.json" }, "s3": { "s3AccessKeyIDName": "AWS_ACCESS_KEY_ID", "s3SecretAccessKeyName": "AWS_SECRET_ACCESS_KEY", "s3Endpoint": "", "s3UseHttps": "", "s3Region": "", "s3VerifySSL": "", "s3UseVirtualBucket": "", "s3UseAccelerate": "", "s3UseAnonymousCredential": "", "s3CABundleConfigMap": "odh-kserve-custom-ca-bundle", "s3CABundle": "/etc/ssl/custom-certs/cabundle.crt" } } deploy: |- { "defaultDeploymentMode": "RawDeployment" } explainers: '{}' inferenceService: |- { "serviceAnnotationDisallowedList": [ "autoscaling.knative.dev/min-scale", "autoscaling.knative.dev/max-scale", "internal.serving.kserve.io/storage-initializer-sourceuri", "kubectl.kubernetes.io/last-applied-configuration", "security.opendatahub.io/enable-auth", "networking.knative.dev/visibility", "haproxy.router.openshift.io/timeout", "opendatahub.io/hardware-profile-name", "opendatahub.io/hardware-profile-namespace" ] } ingress: |- { "enableGatewayApi": false, "kserveIngressGateway": "openshift-ingress/openshift-ai-inference", "enableLLMInferenceServiceTLS": true, "ingressGateway": "knative-serving/knative-ingress-gateway", "knativeLocalGatewayService": "knative-local-gateway.istio-system.svc.cluster.local", "ingressService": "istio-ingressgateway.istio-system.svc.cluster.local", "localGateway": "istio-system/kserve-local-gateway", "localGatewayService": "kserve-local-gateway.istio-system.svc.cluster.local", "ingressDomain": "apps.5a3fa526-5adb-41fe-b310-55152ddd2118.prod.konfluxeaas.com", "ingressClassName": "openshift-default", "domainTemplate": "{{ .Name }}-{{ .Namespace }}.{{ .IngressDomain }}", "urlScheme": "http", "disableIstioVirtualHost": false, "disableIngressCreation": true } localModel: |- { "enabled": false, "jobNamespace": "opendatahub", "defaultJobImage" : "REPLACE_IMAGE", "fsGroup": 1000, "localModelAgentImage": "REPLACE_IMAGE", "localModelAgentCpuRequest": "100m", "localModelAgentMemoryRequest": "200Mi", "localModelAgentCpuLimit": "100m", "localModelAgentMemoryLimit": "300Mi" } logger: |- { "cpuLimit": "1", "cpuRequest": "100m", "defaultUrl": "http://default-broker", "image": "quay.io/opendatahub/kserve-agent@sha256:67a41ae3021ba2e5287597cf17a0636fe64b6e922a6ed1343bc133b720f5df94", "memoryLimit": "1Gi", "memoryRequest": "100Mi" } metricsAggregator: |- { "enableMetricAggregation": "false", "enablePrometheusScraping" : "false" } oauthProxy: |- { "cpuLimit": "200m", "cpuRequest": "100m", "image": "quay.io/opendatahub/odh-kube-auth-proxy@sha256:dcb09fbabd8811f0956ef612a0c9ddd5236804b9bd6548a0647d2b531c9d01b3", "memoryLimit": "128Mi", "memoryRequest": "64Mi" } openshiftConfig: |- { "modelcachePermissionFixImage": "REPLACE_IMAGE" } opentelemetryCollector: |- { "scrapeInterval": "5s", "metricReceiverEndpoint": "keda-otel-scaler.keda.svc:4317", "metricScalerEndpoint": "keda-otel-scaler.keda.svc:4318", "resource": { "cpuLimit": "1", "memoryLimit": "2Gi", "cpuRequest": "200m", "memoryRequest": "512Mi" } } router: |- { "cpuLimit": "1", "cpuRequest": "100m", "headers": { "propagate": [ "Authorization" ] }, "image": "quay.io/opendatahub/kserve-router@sha256:fa769602a23f523b573db8418e88e5bc10cc9d997f9a762a0e4929807e15e82b", "memoryLimit": "1Gi", "memoryRequest": "100Mi" } security: |- { "autoMountServiceAccountToken": false } service: |- { "serviceClusterIPNone": false } storageInitializer: |- { "cpuLimit": "1", "cpuModelcar": "10m", "cpuRequest": "100m", "enableModelcar": true, "image": "quay.io/opendatahub/kserve-storage-initializer@sha256:9cb363f3a01ee53202c49d182f025a6cf697ac6f3bd0a3c8aef7b04a0a53d808", "memoryLimit": "24Gi", "memoryModelcar": "15Mi", "memoryRequest": "100Mi" } kind: ConfigMap metadata: creationTimestamp: "2026-04-16T16:35:19Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: f:_example: {} f:agent: {} f:autoscaler: {} f:autoscaling-wva-controller-config: {} f:batcher: {} f:credentials: {} f:deploy: {} f:explainers: {} f:inferenceService: {} f:localModel: {} f:logger: {} f:metricsAggregator: {} f:oauthProxy: {} f:openshiftConfig: {} f:opentelemetryCollector: {} f:router: {} f:security: {} f:service: {} f:storageInitializer: {} manager: kubectl operation: Apply time: "2026-04-16T16:35:19Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: f:ingress: {} manager: kubectl-patch operation: Update time: "2026-04-16T16:35:57Z" name: inferenceservice-config namespace: kserve resourceVersion: "12438" uid: 5f797dc6-4837-454a-bf40-9cf92f0ba5e0 - apiVersion: v1 data: kserve-agent: quay.io/opendatahub/kserve-agent@sha256:67a41ae3021ba2e5287597cf17a0636fe64b6e922a6ed1343bc133b720f5df94 kserve-controller: quay.io/opendatahub/kserve-controller@sha256:66d40a3c02e823fdf2e80ebe891997a16737ed52b01d8297dd4679751750d6d9 kserve-llm-d: registry.redhat.io/rhaiis/vllm-cuda-rhel9@sha256:fc68d623d1bfc36c8cb2fe4a71f19c8578cfb420ce8ce07b20a02c1ee0be0cf3 kserve-llm-d-amd-rocm: registry.redhat.io/rhaiis/vllm-rocm-rhel9@sha256:d9a48add238cc095fa43eeee17c8c4d104de60c4dc623e0bc7f8c4b53b2b2e97 kserve-llm-d-ibm-spyre: registry.redhat.io/rhaiis/vllm-spyre-rhel9@sha256:80ae3e435a5be2c1f117f36599103ab05357917dd6e37f0df6613cb3ac2c13ea kserve-llm-d-inference-scheduler: quay.io/opendatahub/llm-d-inference-scheduler:odh-stable kserve-llm-d-intel-gaudi: registry.redhat.io/rhaii-early-access/vllm-gaudi-rhel9:3.4.0-ea.2 kserve-llm-d-nvidia-cuda: registry.redhat.io/rhaiis/vllm-cuda-rhel9@sha256:fc68d623d1bfc36c8cb2fe4a71f19c8578cfb420ce8ce07b20a02c1ee0be0cf3 kserve-llm-d-routing-sidecar: quay.io/opendatahub/llm-d-routing-sidecar:odh-stable kserve-llm-d-uds-tokenizer: quay.io/opendatahub/llm-d-kv-cache:v0.6.0 kserve-localmodel-controller: quay.io/opendatahub/kserve-localmodel-controller:latest kserve-localmodelnode-agent: quay.io/opendatahub/kserve-localmodelnode-agent:latest kserve-router: quay.io/opendatahub/kserve-router@sha256:fa769602a23f523b573db8418e88e5bc10cc9d997f9a762a0e4929807e15e82b kserve-storage-initializer: quay.io/opendatahub/kserve-storage-initializer@sha256:9cb363f3a01ee53202c49d182f025a6cf697ac6f3bd0a3c8aef7b04a0a53d808 kube-rbac-proxy: quay.io/opendatahub/odh-kube-auth-proxy@sha256:dcb09fbabd8811f0956ef612a0c9ddd5236804b9bd6548a0647d2b531c9d01b3 llmisvc-controller: ghcr.io/opendatahub-io/kserve/odh-kserve-llmisvc-controller:release-v0.17 kind: ConfigMap metadata: creationTimestamp: "2026-04-16T16:35:19Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: f:kserve-agent: {} f:kserve-controller: {} f:kserve-llm-d: {} f:kserve-llm-d-amd-rocm: {} f:kserve-llm-d-ibm-spyre: {} f:kserve-llm-d-inference-scheduler: {} f:kserve-llm-d-intel-gaudi: {} f:kserve-llm-d-nvidia-cuda: {} f:kserve-llm-d-routing-sidecar: {} f:kserve-llm-d-uds-tokenizer: {} f:kserve-localmodel-controller: {} f:kserve-localmodelnode-agent: {} f:kserve-router: {} f:kserve-storage-initializer: {} f:kube-rbac-proxy: {} f:llmisvc-controller: {} manager: kubectl operation: Apply time: "2026-04-16T16:35:19Z" name: kserve-parameters namespace: kserve resourceVersion: "11979" uid: 84232bdf-1f69-42a9-98fd-0369a644ca85 - apiVersion: v1 data: ca.crt: | -----BEGIN CERTIFICATE----- MIIDPDCCAiSgAwIBAgIIQoq22UvtqoMwDQYJKoZIhvcNAQELBQAwJjESMBAGA1UE CxMJb3BlbnNoaWZ0MRAwDgYDVQQDEwdyb290LWNhMB4XDTI2MDQxNjE2MjMzMVoX DTM2MDQxMzE2MjMzMVowJjESMBAGA1UECxMJb3BlbnNoaWZ0MRAwDgYDVQQDEwdy b290LWNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAurduFzedxNin NKX60j4LEy2oYe03FHYzOXhZ13qtrTGRXPvzjK6ZisEd3NU9lV862vjEF+9TqrDd WzKFKNd7+jzbnRC1m/bueL2IIlzmIvehE6Lquwm4dGJs2Wk0ktqQWeMHg6HSy8Ti 1LHH1tJjX4kajayMgo3cQr8fQeGwHR2m5WhmB0JJL42F8q+02Dsyn4OGi8vKmTUa jxGLc9shQI5NZCxH7EgIIMmV9Sn9JbQLU9RVuKxfOA1w5QGCx7kti9F97tOQc11S aXgowY4Urjgha/Hzw67uTYih5OqXpQeitk3Rf9GGqpPNJY0tk6BRIn661niN0+T5 JvjWSHz07wIDAQABo24wbDAOBgNVHQ8BAf8EBAMCAqQwDwYDVR0TAQH/BAUwAwEB /zBJBgNVHQ4EQgRAbNLpu9O52liPDf4KLCttW2yvKl8AyQOG5qiZfOMh7E5FCM3f FRwtdjOOQrkyewydVxKXFhBwvKtCTs8uuxzAtTANBgkqhkiG9w0BAQsFAAOCAQEA nWpZ4YuFM/Eo8olRDcDBJnMSkDQZ4czeWL18CZ+5irYb4Avewdtlr0eIiUE+Ilaq j4tVE48SzvyAws6VfK6I1fWirEkj+mghDfOQQVAlnz+MzlFGdlISOrAs4wP51W3R ZAU9cbD6bldvSr+CICzSieCoUnEkjVw+Mo4OKBrKJ0USELPB4WZkaHmJD1zrVMQ3 DCLKsQB3Kc0GbujfUkuR7zSqdVb08ULBzfIYKdRIVQW7GXIME0UQvG3kTrnFr7E+ IOo+TPAjpxSsykOrPhDccSG9K2i5PLMXHgdPXJpO3ehLgdAN7QBkBoIMEGEfcbPK PAUDTGrFZrA4mE6kXTGkig== -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIEADCCAuigAwIBAgIIZZlpTltcvOwwDQYJKoZIhvcNAQELBQAwJjESMBAGA1UE CxMJb3BlbnNoaWZ0MRAwDgYDVQQDEwdyb290LWNhMB4XDTI2MDQxNjE2MjQyNloX DTI3MDQxNjE2MjQyNlowMDESMBAGA1UEChMJb3BlbnNoaWZ0MRowGAYDVQQDExFv cGVuc2hpZnQtaW5ncmVzczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB ANiqli80o/vIfYmNJdxTA1LWhOq5ONKq+HU+alGGbWlH5PyeAKNo5zHmc4SXmupO t5MWThy5mLcJYbBxqj8TJ44e3W+WAr6FU3huRjcquY3MJamD3r0Lox3jXkcHCCox hSHB4q5Gc93CJ63Vdgl+3Gumn4454OSMf5reUOkNlrtpGcVKiv4MR42q/A8D0vlE PP+JcMrfq8HgMHhP9SbLXjWHvBlStNquNztMeYmEdnKMKXKAFq0+WYmqUrZkKkoT 33cT7jHWR8fVE0eu3gUzlHVsdnsSZMQjwEZmZd40dbJg3qN+cg8QFRDLJTaAIr4N WxQJu++UZi9rgUwznIbooKECAwEAAaOCASYwggEiMA4GA1UdDwEB/wQEAwIFoDAd BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADBJBgNV HQ4EQgRAY+Udir4UPQk7WCFHpLhgyhgaPrE2IchHRWjTReUC7LonWaGpSu/HznrZ QxjSXc4JCy1Xgc/o4MMgowru8ZwORzBLBgNVHSMERDBCgEBs0um707naWI8N/gos K21bbK8qXwDJA4bmqJl84yHsTkUIzd8VHC12M45CuTJ7DJ1XEpcWEHC8q0JOzy67 HMC1MEsGA1UdEQREMEKCQCouYXBwcy41YTNmYTUyNi01YWRiLTQxZmUtYjMxMC01 NTE1MmRkZDIxMTgucHJvZC5rb25mbHV4ZWFhcy5jb20wDQYJKoZIhvcNAQELBQAD ggEBAHlyPcrRnkGjLGSam6Aegha7McCj6UY1v1fwQ9qoiGDZVSCFTBB+m6SXbUrq my9vRPzUw+eZ2H46lD+spsR91GYVslc6Qk5uV2O5eh9kkAD6pdo6XmMuQ1X49TPs /LAaS3s2wtK2Az9MgA4sF5QQxplQ1ZMlGLWAljEAAcNNilQiyqKJ3FWVPCDnQolt 9GacIuz8h2KeBpcpER0LRmQhf8pMLMse+RH26100ainSBZ9oq5wgPYPIWtWKlUSr QbU3+dBQeqUp9XNoa09Y6VvgDekBVdKKfsigb0Cu2oj483kCyMvHfvqbaeUOkwVO AD8PkIBv2R7ahYhBpXD2OQCuayM= -----END CERTIFICATE----- kind: ConfigMap metadata: annotations: kubernetes.io/description: Contains a CA bundle that can be used to verify the kube-apiserver when using internal endpoints such as the internal service IP or kubernetes.default.svc. No other usage is guaranteed across distributions of Kubernetes clusters. creationTimestamp: "2026-04-16T16:35:06Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:ca.crt: {} f:metadata: f:annotations: .: {} f:kubernetes.io/description: {} manager: kube-controller-manager operation: Update time: "2026-04-16T16:35:06Z" name: kube-root-ca.crt namespace: kserve resourceVersion: "11748" uid: 2877d1d6-3e7e-4e87-85d4-081567766b24 - apiVersion: v1 data: cabundle.crt: |- -----BEGIN CERTIFICATE----- MIIDUTCCAjmgAwIBAgIIXZYbLr7mLBAwDQYJKoZIhvcNAQELBQAwNjE0MDIGA1UE Awwrb3BlbnNoaWZ0LXNlcnZpY2Utc2VydmluZy1zaWduZXJAMTc3NjM1Njk5NzAe Fw0yNjA0MTYxNjI5NTdaFw0yODA2MTQxNjI5NThaMDYxNDAyBgNVBAMMK29wZW5z aGlmdC1zZXJ2aWNlLXNlcnZpbmctc2lnbmVyQDE3NzYzNTY5OTcwggEiMA0GCSqG SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNQyJTe4kJVl61V9A6tBET174XYF6i6gQ 4pgNTzSC+atdS0hij/RRPAy6f/Y3/Sq8g3mLJfUfYLPKZEa458bIGiEOutHdabnF rL3hrwJLT81DV99pyPr7u+BNsQfzLfdshiSB0JhOg/ytxGvq54b7yUHZLXmGT+8y /BKuf3djrjmd9uHopBpXsOvrY3knDP97dMvhPjnqj3zXhiGhhJEeeEqipC+dbGPZ kWBJmdN/5DiA186IZP+r9ItrkRIB0dEpU0wXiFa/0j9SR0WZIrJQ5XBpsshDdIE4 cp7+smgMNStsxZldxgXtRTXsO+nPtqBIA+CFkDu1gc80oyRgjXPDAgMBAAGjYzBh MA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSfPy3u C+4X4A+u/YXtCS97Vm4BADAfBgNVHSMEGDAWgBSfPy3uC+4X4A+u/YXtCS97Vm4B ADANBgkqhkiG9w0BAQsFAAOCAQEAfiGqXycJ4m5+umuY5+nlJvVJuLB6DKzY0vEW feY11DR2z27E0+ONPw/p5NUa6+MWEDxWjgXZ08G17yzI8qPNzBg0opH5x2yHoif7 sAt6bCTontfy8fZbhKvSVOKkrL4Ecw51WTkK+4g1X5cv4FwpDrlZZVYk0lRxw3FS pJGKkgHMQ6mhVwpaRbwbMUnok0FPp7Qq77vZzrjg2PlLxzuDdrttjAeu5VtZZYEf iCtqQCB9VEx3ego3zSkl9Es2MP7FB8jEgOQYKBMYiVXxvYFn8SBgMHhxQ6wCUkOm udAwlyJZzk3MX8vpUDyO74L7SFXYgEaChKPRshohYW/eRYXMGw== -----END CERTIFICATE----- kind: ConfigMap metadata: creationTimestamp: "2026-04-16T16:36:36Z" labels: opendatahub.io/managed: "true" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:cabundle.crt: {} f:metadata: f:labels: .: {} f:opendatahub.io/managed: {} manager: manager operation: Update time: "2026-04-16T16:36:36Z" name: odh-kserve-custom-ca-bundle namespace: kserve resourceVersion: "12853" uid: 63cf706f-38e2-4542-b6d3-f51abea0972f - apiVersion: v1 data: guardrails-detector-huggingface-runtime-image: quay.io/trustyai/guardrails-detector-huggingface-runtime:latest kserve-state: managed mlserver-image: quay.io/opendatahub/mlserver:fast modelregistry-state: removed nim-state: managed odh-model-controller: quay.io/opendatahub/odh-model-controller:fast odh-model-serving-api: quay.io/opendatahub/odh-model-controller:odh-model-serving-api-fast ovms-image: quay.io/opendatahub/openvino_model_server:2025.1-release ray-tls-generator-image: registry.redhat.io/ubi9/ubi-minimal:latest tgis-image: quay.io/opendatahub/text-generation-inference:fast vllm-cpu-image: quay.io/vllm/vllm:latest vllm-cpu-x86-image: quay.io/vllm/vllm:latest vllm-cuda-image: quay.io/vllm/vllm-cuda:latest vllm-gaudi-image: quay.io/opendatahub/vllm:fast-gaudi vllm-rocm-image: quay.io/vllm/vllm-rocm:latest vllm-spyre-image: quay.io/vllm/vllm:latest kind: ConfigMap metadata: annotations: kubectl.kubernetes.io/last-applied-configuration: | {"apiVersion":"v1","data":{"guardrails-detector-huggingface-runtime-image":"quay.io/trustyai/guardrails-detector-huggingface-runtime:latest","kserve-state":"managed","mlserver-image":"quay.io/opendatahub/mlserver:fast","modelregistry-state":"removed","nim-state":"managed","odh-model-controller":"quay.io/opendatahub/odh-model-controller:fast","odh-model-serving-api":"quay.io/opendatahub/odh-model-controller:odh-model-serving-api-fast","ovms-image":"quay.io/opendatahub/openvino_model_server:2025.1-release","ray-tls-generator-image":"registry.redhat.io/ubi9/ubi-minimal:latest","tgis-image":"quay.io/opendatahub/text-generation-inference:fast","vllm-cpu-image":"quay.io/vllm/vllm:latest","vllm-cpu-x86-image":"quay.io/vllm/vllm:latest","vllm-cuda-image":"quay.io/vllm/vllm-cuda:latest","vllm-gaudi-image":"quay.io/opendatahub/vllm:fast-gaudi","vllm-rocm-image":"quay.io/vllm/vllm-rocm:latest","vllm-spyre-image":"quay.io/vllm/vllm:latest"},"kind":"ConfigMap","metadata":{"annotations":{},"name":"odh-model-controller-parameters","namespace":"kserve"}} creationTimestamp: "2026-04-16T16:36:31Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: .: {} f:guardrails-detector-huggingface-runtime-image: {} f:kserve-state: {} f:mlserver-image: {} f:modelregistry-state: {} f:nim-state: {} f:odh-model-controller: {} f:odh-model-serving-api: {} f:ovms-image: {} f:ray-tls-generator-image: {} f:tgis-image: {} f:vllm-cpu-image: {} f:vllm-cpu-x86-image: {} f:vllm-cuda-image: {} f:vllm-gaudi-image: {} f:vllm-rocm-image: {} f:vllm-spyre-image: {} f:metadata: f:annotations: .: {} f:kubectl.kubernetes.io/last-applied-configuration: {} manager: kubectl-client-side-apply operation: Update time: "2026-04-16T16:36:31Z" name: odh-model-controller-parameters namespace: kserve resourceVersion: "12711" uid: 41b6e637-a51b-470e-a0fa-58b26233e2f6 - apiVersion: v1 data: service-ca.crt: | -----BEGIN CERTIFICATE----- MIIDUTCCAjmgAwIBAgIIXZYbLr7mLBAwDQYJKoZIhvcNAQELBQAwNjE0MDIGA1UE Awwrb3BlbnNoaWZ0LXNlcnZpY2Utc2VydmluZy1zaWduZXJAMTc3NjM1Njk5NzAe Fw0yNjA0MTYxNjI5NTdaFw0yODA2MTQxNjI5NThaMDYxNDAyBgNVBAMMK29wZW5z aGlmdC1zZXJ2aWNlLXNlcnZpbmctc2lnbmVyQDE3NzYzNTY5OTcwggEiMA0GCSqG SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNQyJTe4kJVl61V9A6tBET174XYF6i6gQ 4pgNTzSC+atdS0hij/RRPAy6f/Y3/Sq8g3mLJfUfYLPKZEa458bIGiEOutHdabnF rL3hrwJLT81DV99pyPr7u+BNsQfzLfdshiSB0JhOg/ytxGvq54b7yUHZLXmGT+8y /BKuf3djrjmd9uHopBpXsOvrY3knDP97dMvhPjnqj3zXhiGhhJEeeEqipC+dbGPZ kWBJmdN/5DiA186IZP+r9ItrkRIB0dEpU0wXiFa/0j9SR0WZIrJQ5XBpsshDdIE4 cp7+smgMNStsxZldxgXtRTXsO+nPtqBIA+CFkDu1gc80oyRgjXPDAgMBAAGjYzBh MA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSfPy3u C+4X4A+u/YXtCS97Vm4BADAfBgNVHSMEGDAWgBSfPy3uC+4X4A+u/YXtCS97Vm4B ADANBgkqhkiG9w0BAQsFAAOCAQEAfiGqXycJ4m5+umuY5+nlJvVJuLB6DKzY0vEW feY11DR2z27E0+ONPw/p5NUa6+MWEDxWjgXZ08G17yzI8qPNzBg0opH5x2yHoif7 sAt6bCTontfy8fZbhKvSVOKkrL4Ecw51WTkK+4g1X5cv4FwpDrlZZVYk0lRxw3FS pJGKkgHMQ6mhVwpaRbwbMUnok0FPp7Qq77vZzrjg2PlLxzuDdrttjAeu5VtZZYEf iCtqQCB9VEx3ego3zSkl9Es2MP7FB8jEgOQYKBMYiVXxvYFn8SBgMHhxQ6wCUkOm udAwlyJZzk3MX8vpUDyO74L7SFXYgEaChKPRshohYW/eRYXMGw== -----END CERTIFICATE----- kind: ConfigMap metadata: annotations: service.beta.openshift.io/inject-cabundle: "true" creationTimestamp: "2026-04-16T16:35:06Z" managedFields: - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: {} f:metadata: f:annotations: .: {} f:service.beta.openshift.io/inject-cabundle: {} manager: kube-controller-manager operation: Update time: "2026-04-16T16:35:06Z" - apiVersion: v1 fieldsType: FieldsV1 fieldsV1: f:data: f:service-ca.crt: {} manager: service-ca-operator operation: Update time: "2026-04-16T16:35:06Z" name: openshift-service-ca.crt namespace: kserve resourceVersion: "11759" uid: 17e9141f-b492-4490-865e-1cc3b433a776 kind: ConfigMapList metadata: resourceVersion: "39598"