From 30622089df4ded85f319cf8ac4e63e397b9d9c27 Mon Sep 17 00:00:00 2001 From: Docs Deploy Date: Fri, 24 Jan 2025 16:03:38 +0000 Subject: [PATCH] Deployed 3fe63fbe to dev with MkDocs 1.6.1 and mike 2.1.3 --- .../design/architectural-overview/index.html | 1796 ----------- .../design/modular_installation/index.html | 1426 --------- dev/authorino-operator/index.html | 2133 -------------- dev/authorino/docs/code_of_conduct/index.html | 1347 --------- dev/authorino/index.html | 1819 ------------ ...drant.io_v1alpha1_dnshealthcheckprobe.yaml | 15 - .../kuadrant.io_v1alpha1_dnsrecord.yaml | 20 - .../config/samples/kustomization.yaml | 5 - dev/dns-operator/docs/RELEASE/index.html | 1487 ---------- .../dnsrecord-lifecycle/index.html | 1357 --------- .../docs/reference/dnsrecord/index.html | 1639 ----------- dev/dns-operator/index.html | 1667 ----------- dev/install-olm/index.html | 2 +- .../samples/kuadrant_v1_authpolicy.yaml | 18 - .../config/samples/kuadrant_v1_dnspolicy.yaml | 14 - .../samples/kuadrant_v1_ratelimitpolicy.yaml | 15 - .../config/samples/kuadrant_v1_tlspolicy.yaml | 14 - .../samples/kuadrant_v1beta1_kuadrant.yaml | 6 - .../config/samples/kustomization.yaml | 8 - .../doc/images/kuadrant-architecture.svg | 1 - .../doc/install/install-make/index.html | 1395 --------- .../doc/overviews/development/index.html | 1869 ------------ .../doc/overviews/logging/index.html | 1343 --------- .../images/authpolicy-control-structure.png | Bin 60035 -> 0 bytes .../rlp-target-gateway-resource/index.html | 1781 ----------- .../index.html | 1371 --------- .../dns/orphan-dns-records/index.html | 1449 --------- .../user-guides/misc/external-api/index.html | 1498 ---------- .../examples/alerts/index.html | 1462 --------- .../examples/alerts/kustomization.yaml | 8 - .../examples/alerts/orphan_records.yaml | 22 - .../prometheusrules_policies_missing.yaml | 57 - .../examples/alerts/slo-availability.yaml | 212 -- .../examples/alerts/slo-latency.yaml | 212 -- .../alerts/sloth/slo-availability.yaml | 31 - .../examples/alerts/sloth/slo-latency.yaml | 34 - .../alerts/tests/slo-availability-test.yaml | 47 - .../alerts/tests/slo-latency-test.yaml | 47 - .../examples/dashboards/app_developer.json | 1693 ----------- .../examples/dashboards/business_user.json | 768 ----- .../controller-resources-metrics.json | 339 --- .../controller-runtime-metrics.json | 776 ----- .../examples/dashboards/dns-operator.json | 622 ---- .../examples/dashboards/kustomization.yaml | 28 - .../dashboards/platform_engineer.json | 2616 ----------------- .../examples/dnspolicy/application.yaml | 50 - .../dnspolicy/aws-dns-provider-secret.yaml | 10 - .../dnspolicy/dnspolicy-bad-strategy.yaml | 12 - .../dnspolicy/dnspolicy-exclude-address.yaml | 20 - .../dnspolicy/dnspolicy-healthchecks.yaml | 21 - .../examples/dnspolicy/dnspolicy.yaml | 16 - .../examples/dnspolicy/gateway.yaml | 20 - .../examples/dnspolicy/script.sh | 27 - .../examples/external-api-istio.yaml | 94 - dev/kuadrant-operator/examples/metal-lb.yaml | 14 - .../examples/oas-apikey.yaml | 48 - dev/kuadrant-operator/examples/oas-oidc.yaml | 55 - .../examples/toystore/admin-key-secret.yaml | 15 - .../toystore/alice-api-key-secret.yaml | 14 - .../examples/toystore/authpolicy.yaml | 50 - .../toystore/authpolicy_jwt-k8s-authnz.yaml | 44 - .../examples/toystore/bob-api-key-secret.yaml | 14 - .../examples/toystore/httproute.yaml | 29 - .../examples/toystore/kuadrant.yaml | 6 - .../toystore/ratelimitpolicy_gateway.yaml | 24 - .../toystore/ratelimitpolicy_httproute.yaml | 41 - .../examples/toystore/toystore.yaml | 41 - dev/kuadrant-operator/index.html | 1386 --------- dev/kuadrantctl/doc/RELEASE/index.html | 1422 --------- dev/kuadrantctl/doc/development/index.html | 1385 --------- .../generate-gateway-api-httproute/index.html | 1402 --------- .../generate-kuadrant-auth-policy/index.html | 1863 ------------ .../index.html | 1570 ---------- .../apicurio-security-scheme-apikey.png | Bin 9423 -> 0 bytes ...io-vendor-extension-backend-rate-limit.png | Bin 40320 -> 0 bytes .../doc/kuadrantctl-ci-cd/index.html | 1613 ---------- .../doc/openapi-apicurio/index.html | 1483 ---------- .../openapi-kuadrant-extensions/index.html | 1430 --------- .../openapi-openshift-dev-spaces/index.html | 1823 ------------ dev/kuadrantctl/index.html | 1798 ----------- .../doc/custom-image/index.html | 1387 --------- .../doc/development/index.html | 1725 ----------- dev/limitador-operator/doc/logging/index.html | 1343 --------- .../doc/rate-limit-headers/index.html | 1345 --------- .../doc/resource-requirements/index.html | 1434 --------- dev/limitador-operator/doc/storage/index.html | 1615 ---------- dev/limitador-operator/doc/tracing/index.html | 1353 --------- dev/limitador-operator/index.html | 1444 --------- dev/limitador/LICENSE | 201 -- dev/limitador/doc/how-it-works/index.html | 1419 --------- .../doc/migrations/conditions/index.html | 1348 --------- .../doc/server/configuration/index.html | 1883 ------------ dev/limitador/doc/topologies/index.html | 1427 --------- dev/limitador/index.html | 1481 ---------- .../docs/http_server_spec.json | 305 -- dev/limitador/limitador-server/index.html | 1412 --------- .../kubernetes/centos-pod.yaml | 15 - .../limitador-server/kubernetes/index.html | 1666 ----------- .../kubernetes/kuard-deployment.yaml | 42 - .../kuard-envoy-config-configmap.yaml | 84 - ...y-sidecar-metrics-dashboard-screenshot.png | Bin 80203 -> 0 bytes .../kubernetes/kuard-podmonitor.yaml | 15 - .../kubernetes/kuard-service.yaml | 16 - .../limitador-config-configmap.yaml | 16 - .../kubernetes/limitador-deployment.yaml | 62 - .../limitador-grafanadashboard.json | 2154 -------------- ...limitador-metrics-dashboard-screenshot.png | Bin 70285 -> 0 bytes .../kubernetes/limitador-podmonitor.yaml | 15 - .../kubernetes/limitador-service.yaml | 20 - .../limitador-server/kubernetes/ratelimit.svg | 1 - .../kubernetes/redis-service.yaml | 13 - .../kubernetes/redis-statefulset.yaml | 55 - .../limitador-server/sandbox/Makefile | 126 - .../sandbox/docker-compose-envoy-3-node.yaml | 74 - .../sandbox/docker-compose-envoy.yaml | 28 - .../docker-compose-limitador-disk.yaml | 31 - ...-compose-limitador-distributed-3-node.yaml | 58 - .../docker-compose-limitador-distributed.yaml | 24 - .../docker-compose-limitador-memory.yaml | 31 - ...docker-compose-limitador-redis-cached.yaml | 43 - .../docker-compose-limitador-redis-otel.yaml | 47 - .../docker-compose-limitador-redis-tls.yaml | 45 - .../docker-compose-limitador-redis.yaml | 35 - .../limitador-server/sandbox/envoy.yaml | 84 - .../limitador-server/sandbox/envoy2.yaml | 84 - .../limitador-server/sandbox/envoy3.yaml | 84 - .../limitador-server/sandbox/index.html | 1611 ---------- .../limitador-server/sandbox/limits.yaml | 22 - .../limitador-server/sandbox/load-test.json | 18 - .../sandbox/loadtest/Cargo.lock | 1874 ------------ .../sandbox/loadtest/Cargo.toml | 10 - .../sandbox/loadtest/src/main.rs | 26 - .../sandbox/redis-otel/index.html | 1407 --------- .../sandbox/redis-tls/index.html | 1351 --------- .../sandbox/redis-tls/redis-config.conf | 7 - dev/limitador/limitador/index.html | 1355 --------- dev/search/search_index.json | 2 +- dev/sitemap.xml | 188 -- dev/sitemap.xml.gz | Bin 1406 -> 986 bytes 139 files changed, 2 insertions(+), 86268 deletions(-) delete mode 100644 dev/architecture/docs/design/architectural-overview/index.html delete mode 100644 dev/architecture/docs/design/modular_installation/index.html delete mode 100644 dev/authorino-operator/index.html delete mode 100644 dev/authorino/docs/code_of_conduct/index.html delete mode 100644 dev/authorino/index.html delete mode 100644 dev/dns-operator/config/samples/kuadrant.io_v1alpha1_dnshealthcheckprobe.yaml delete mode 100644 dev/dns-operator/config/samples/kuadrant.io_v1alpha1_dnsrecord.yaml delete mode 100644 dev/dns-operator/config/samples/kustomization.yaml delete mode 100644 dev/dns-operator/docs/RELEASE/index.html delete mode 100644 dev/dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/index.html delete mode 100644 dev/dns-operator/docs/reference/dnsrecord/index.html delete mode 100644 dev/dns-operator/index.html delete mode 100644 dev/kuadrant-operator/config/samples/kuadrant_v1_authpolicy.yaml delete mode 100644 dev/kuadrant-operator/config/samples/kuadrant_v1_dnspolicy.yaml delete mode 100644 dev/kuadrant-operator/config/samples/kuadrant_v1_ratelimitpolicy.yaml delete mode 100644 dev/kuadrant-operator/config/samples/kuadrant_v1_tlspolicy.yaml delete mode 100644 dev/kuadrant-operator/config/samples/kuadrant_v1beta1_kuadrant.yaml delete mode 100644 dev/kuadrant-operator/config/samples/kustomization.yaml delete mode 100644 dev/kuadrant-operator/doc/images/kuadrant-architecture.svg delete mode 100644 dev/kuadrant-operator/doc/install/install-make/index.html delete mode 100644 dev/kuadrant-operator/doc/overviews/development/index.html delete mode 100644 dev/kuadrant-operator/doc/overviews/logging/index.html delete mode 100644 dev/kuadrant-operator/doc/proposals/images/authpolicy-control-structure.png delete mode 100644 dev/kuadrant-operator/doc/proposals/rlp-target-gateway-resource/index.html delete mode 100644 dev/kuadrant-operator/doc/user-guides/dns/dns-excluding-specific-addresses/index.html delete mode 100644 dev/kuadrant-operator/doc/user-guides/dns/orphan-dns-records/index.html delete mode 100644 dev/kuadrant-operator/doc/user-guides/misc/external-api/index.html delete mode 100644 dev/kuadrant-operator/examples/alerts/index.html delete mode 100644 dev/kuadrant-operator/examples/alerts/kustomization.yaml delete mode 100644 dev/kuadrant-operator/examples/alerts/orphan_records.yaml delete mode 100644 dev/kuadrant-operator/examples/alerts/prometheusrules_policies_missing.yaml delete mode 100644 dev/kuadrant-operator/examples/alerts/slo-availability.yaml delete mode 100644 dev/kuadrant-operator/examples/alerts/slo-latency.yaml delete mode 100644 dev/kuadrant-operator/examples/alerts/sloth/slo-availability.yaml delete mode 100644 dev/kuadrant-operator/examples/alerts/sloth/slo-latency.yaml delete mode 100644 dev/kuadrant-operator/examples/alerts/tests/slo-availability-test.yaml delete mode 100644 dev/kuadrant-operator/examples/alerts/tests/slo-latency-test.yaml delete mode 100644 dev/kuadrant-operator/examples/dashboards/app_developer.json delete mode 100644 dev/kuadrant-operator/examples/dashboards/business_user.json delete mode 100644 dev/kuadrant-operator/examples/dashboards/controller-resources-metrics.json delete mode 100644 dev/kuadrant-operator/examples/dashboards/controller-runtime-metrics.json delete mode 100644 dev/kuadrant-operator/examples/dashboards/dns-operator.json delete mode 100644 dev/kuadrant-operator/examples/dashboards/kustomization.yaml delete mode 100644 dev/kuadrant-operator/examples/dashboards/platform_engineer.json delete mode 100644 dev/kuadrant-operator/examples/dnspolicy/application.yaml delete mode 100644 dev/kuadrant-operator/examples/dnspolicy/aws-dns-provider-secret.yaml delete mode 100644 dev/kuadrant-operator/examples/dnspolicy/dnspolicy-bad-strategy.yaml delete mode 100644 dev/kuadrant-operator/examples/dnspolicy/dnspolicy-exclude-address.yaml delete mode 100644 dev/kuadrant-operator/examples/dnspolicy/dnspolicy-healthchecks.yaml delete mode 100644 dev/kuadrant-operator/examples/dnspolicy/dnspolicy.yaml delete mode 100644 dev/kuadrant-operator/examples/dnspolicy/gateway.yaml delete mode 100644 dev/kuadrant-operator/examples/dnspolicy/script.sh delete mode 100644 dev/kuadrant-operator/examples/external-api-istio.yaml delete mode 100644 dev/kuadrant-operator/examples/metal-lb.yaml delete mode 100644 dev/kuadrant-operator/examples/oas-apikey.yaml delete mode 100644 dev/kuadrant-operator/examples/oas-oidc.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/admin-key-secret.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/alice-api-key-secret.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/authpolicy.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/authpolicy_jwt-k8s-authnz.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/bob-api-key-secret.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/httproute.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/kuadrant.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/ratelimitpolicy_gateway.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/ratelimitpolicy_httproute.yaml delete mode 100644 dev/kuadrant-operator/examples/toystore/toystore.yaml delete mode 100644 dev/kuadrant-operator/index.html delete mode 100644 dev/kuadrantctl/doc/RELEASE/index.html delete mode 100644 dev/kuadrantctl/doc/development/index.html delete mode 100644 dev/kuadrantctl/doc/generate-gateway-api-httproute/index.html delete mode 100644 dev/kuadrantctl/doc/generate-kuadrant-auth-policy/index.html delete mode 100644 dev/kuadrantctl/doc/generate-kuadrant-rate-limit-policy/index.html delete mode 100644 dev/kuadrantctl/doc/images/apicurio-security-scheme-apikey.png delete mode 100644 dev/kuadrantctl/doc/images/apicurio-vendor-extension-backend-rate-limit.png delete mode 100644 dev/kuadrantctl/doc/kuadrantctl-ci-cd/index.html delete mode 100644 dev/kuadrantctl/doc/openapi-apicurio/index.html delete mode 100644 dev/kuadrantctl/doc/openapi-kuadrant-extensions/index.html delete mode 100644 dev/kuadrantctl/doc/openapi-openshift-dev-spaces/index.html delete mode 100644 dev/kuadrantctl/index.html delete mode 100644 dev/limitador-operator/doc/custom-image/index.html delete mode 100644 dev/limitador-operator/doc/development/index.html delete mode 100644 dev/limitador-operator/doc/logging/index.html delete mode 100644 dev/limitador-operator/doc/rate-limit-headers/index.html delete mode 100644 dev/limitador-operator/doc/resource-requirements/index.html delete mode 100644 dev/limitador-operator/doc/storage/index.html delete mode 100644 dev/limitador-operator/doc/tracing/index.html delete mode 100644 dev/limitador-operator/index.html delete mode 100644 dev/limitador/LICENSE delete mode 100644 dev/limitador/doc/how-it-works/index.html delete mode 100644 dev/limitador/doc/migrations/conditions/index.html delete mode 100644 dev/limitador/doc/server/configuration/index.html delete mode 100644 dev/limitador/doc/topologies/index.html delete mode 100644 dev/limitador/index.html delete mode 100644 dev/limitador/limitador-server/docs/http_server_spec.json delete mode 100644 dev/limitador/limitador-server/index.html delete mode 100644 dev/limitador/limitador-server/kubernetes/centos-pod.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/index.html delete mode 100644 dev/limitador/limitador-server/kubernetes/kuard-deployment.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/kuard-envoy-config-configmap.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/kuard-envoyproxy-sidecar-metrics-dashboard-screenshot.png delete mode 100644 dev/limitador/limitador-server/kubernetes/kuard-podmonitor.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/kuard-service.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/limitador-config-configmap.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/limitador-deployment.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/limitador-grafanadashboard.json delete mode 100644 dev/limitador/limitador-server/kubernetes/limitador-metrics-dashboard-screenshot.png delete mode 100644 dev/limitador/limitador-server/kubernetes/limitador-podmonitor.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/limitador-service.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/ratelimit.svg delete mode 100644 dev/limitador/limitador-server/kubernetes/redis-service.yaml delete mode 100644 dev/limitador/limitador-server/kubernetes/redis-statefulset.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/Makefile delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-envoy-3-node.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-envoy.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-limitador-disk.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-limitador-distributed-3-node.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-limitador-distributed.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-limitador-memory.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-cached.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-otel.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-tls.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/envoy.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/envoy2.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/envoy3.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/index.html delete mode 100644 dev/limitador/limitador-server/sandbox/limits.yaml delete mode 100644 dev/limitador/limitador-server/sandbox/load-test.json delete mode 100644 dev/limitador/limitador-server/sandbox/loadtest/Cargo.lock delete mode 100644 dev/limitador/limitador-server/sandbox/loadtest/Cargo.toml delete mode 100644 dev/limitador/limitador-server/sandbox/loadtest/src/main.rs delete mode 100644 dev/limitador/limitador-server/sandbox/redis-otel/index.html delete mode 100644 dev/limitador/limitador-server/sandbox/redis-tls/index.html delete mode 100644 dev/limitador/limitador-server/sandbox/redis-tls/redis-config.conf delete mode 100644 dev/limitador/limitador/index.html diff --git a/dev/architecture/docs/design/architectural-overview/index.html b/dev/architecture/docs/design/architectural-overview/index.html deleted file mode 100644 index c095cdb2..00000000 --- a/dev/architecture/docs/design/architectural-overview/index.html +++ /dev/null @@ -1,1796 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Kuadrant Architectural Overview [Draft] - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Kuadrant Architectural Overview [Draft]

- -

Overview

-

It is important to note that Kuadrant is not in itself a gateway provider. Kuadrant provides a set of valuable policy APIs that enhance Gateway API via its defined policy attachment extension point. The policy APIs are reconciled by a set of policy controllers and enforced via integration at different points to configure, enhance and secure the application connectivity provided via Gateway API and the underlying gateway provider. -These policy extensions are focused around areas such as DNS management supporting global load balancing and health checks, alongside service protection specific APIs such as rate limiting and auth. Kuadrant also integrates with Open Cluster Management as a multi-cluster control plane to enable defining and distributing Gateways across multiple clusters, providing load balancing and tls management for these distributed gateways. These integrations and features can be managed centrally in a declarative way from the Open Cluster Management Hub using Kubernetes resources.

-

Key Architectural Areas

-
    -
  • The Kuadrant architecture is spread across a control plane and also a data plane. Kuadrant can work in both a single and multi-cluster context. -Currently in order for all APIs to work in a single or multi-cluster context you need to have Open Cluster Management installed. While this may change in the future, this approach allows us to start with a single cluster and seamlessly scale as more clusters are added.
  • -
  • The control plane is where policies are exposed and expressed as kubernetes APIs and reconciled by the Kuadrant policy controllers.
  • -
  • The data plane is where Kuadrant's service protection components, configured by the control plane policies, are enforced within the gateway instance as part of the request flow.
  • -
-

1000m Architecture

-

-

Control Plane Components and Responsibilities

-

A control plane component is something responsible for accepting instruction via a CRD based API and ensuring that configuration is manifested into state that can be acted on.

-

Kuadrant Operator

-
    -
  • Installation of data plane service protection components via their respective operators
  • -
  • Exposes RateLimitPolicy and AuthPolicy and is currently the policy controller for these APIs
  • -
  • Configures the Gateway to be able to leverage the data plane service protection components
  • -
-

Multi-Cluster Gateway Controller

-
    -
  • Exposes DNSPolicy and TLSPolicy
  • -
  • Configures DNS providers (e.g AWS Route 53) and TLS providers
  • -
  • Focused around use cases involving distributed gateways (for example across clouds or geographic regions)
  • -
  • Integrates with Open Cluster Management as the multi-cluster management hub to distribute and observe gateway status based on the clusters they are deployed to. Works directly with Open Cluster Management APIs such PlacementDecision and ManifestWork.
  • -
-

Kuadrant-add-on-manager

-
    -
  • Sub component in the gateway controller repository
  • -
  • Follows the add-on pattern from Open Cluster Management
  • -
  • Responsible for configuring and installing Kuadrant into a target spoke cluster
  • -
-

Limitador Operator:

-
    -
  • Installs and configures Limitador
  • -
-

Authorino Operator:

-
    -
  • Installs and configures Authorino
  • -
-

Data Plane Components and Responsibilities

-

A data plane component sits in the request flow and is responsible for enforcing policy and providing service protection capabilities based on configuration managed and created by the control plane.

-

Limitador

-
    -
  • Complies with the with Envoy rate limiting API to provide rate limiting to the gateway
  • -
-

Authorino

-
    -
  • Complies with the Envoy external auth API to provide auth integration to the gateway
  • -
-

WASM Shim

-
    -
  • Uses the Proxy WASM ABI Spec to integrate with Envoy and provide filtering and connectivity to Limitador for request time enforcement of and rate limiting
  • -
-

Dependencies and integrations

-

In order to provide its full suite of functionality, Kuadrant has several dependencies. Some of these are optional depending on the functionality needed.

-

Cert-Manager: Required

- -

Open Cluster Manager: Required

-
    -
  • Provides a multi-cluster control plane to enable the defining and distributing of Gateways across multiple clusters.
  • -
-

Istio: Required

-
    -
  • Gateway API provider that Kuadrant integrates with via WASM and Istio APIS to provide service protection capabilities.
  • -
  • Used by RateLimitPolicy and AuthPolicy
  • -
-

Gateway API: Required

-
    -
  • New standard for Ingress from the Kubernetes community
  • -
  • Gateway API is the core API that Kuadrant integrates with.
  • -
-

Thanos/Prometheus/Grafana: Optional

-
    -
  • Provides observability integration
  • -
  • Rather than providing any Kuadrant specific observability tooling, we instead look to leverage existing tools and technologies to provide observability capabilities for ingress.
  • -
-

High Level Multi-Cluster Architecture

-

-

Kuadrant has a multi-cluster gateway controller that is intended to run in a Open Cluster Management provided "Hub" cluster. This cluster is effectively a central management cluster where policy and gateways along with all that Open Cluster Management offers can be defined and distributed to the managed "spoke" clusters.

-

Single cluster

-

In a single cluster context, the overall architecture remains the same as above, the key difference is that the Hub and Spoke cluster are now a single cluster rather than multiple clusters. This is how we are initially supporting single cluster.

-

How does Kuadrant leverage Open Cluster Management?

-

Kuadrant deploys a multi-cluster gateway controller into the Open Cluster Management hub (a control plane that manages a set of "spoke" clusters where workloads are executed). This controller offers its own APIs but also integrates with hub CRD based APIs (such as the placement API) along with the Gateway API CRD based APIs in order to provide multi-cluster Gateway capabilities to the hub and distribute actual gateway instances to the spokes. See the Open Cluster Management docs for further details on the hub spoke architecture.

-

As part of installing Kuadrant, the Gateway API CRDs are also installed into the hub cluster and Kuadrant defines a standard Gateway API GatewayClass resource that the multi-cluster gateway controller is the chosen controller for.

-

Once installed, an Open Cluster Management user can then (with the correct RBAC in place) define in the standard way a Gateway resource that inherits from the Kuadrant configured GatewayClass in the hub. There is nothing unique about this Gateway definition, the difference is what it represents and how it is used. This Gateway is used to represent a "multi-cluster" distributed gateway. As such there are no pods running behind this Gateway instance in the hub cluster, instead it serves as a template that the Kuadrant multi-cluster gateway controller reconciles and distributes to targeted spoke clusters. It leverages the Open Cluster Management APIs to distribute these gateways (more info below) and aggregates the status information from each spoke cluster instance of this gateway back to this central definition, in doing this it can represent the status of the gateway across multiple clusters but also use that information to integrate with DNS providers etc.

-

-

Gateway Deployment and Distribution

-

In order for a multi-cluster gateway to be truly useful, it needs to be distributed or "placed" on a specific set of hub managed spoke clusters. Open Cluster Management is responsible for a set of placement and replication APIs. Kuadrant is aware of these APIs, and so when a given gateway is chosen to be placed on a set of managed clusters, Kuadrant multi-cluster gateway controller will ensure the right resources (ManifestWork) are created in the correct namespaces in the hub. Open Cluster Management then is responsible for syncing these to the actual spoke cluster and reporting back the status of these resources to the Hub. A user would indicate which clusters they want a gateway placed on by using a Placement and then labeling the gateway using the cluster.open-cluster-management.io/placement label.

-

In order for the Gateway to be instantiated, we need to know what underlying gateway provider is being used on the spoke clusters. Admins can then set this provider in the hub via the GatewayClass params. In the hub, Kuadrant will then apply a transformation to the gateway to ensure when synced it references this spoke gateway provider (Istio for example).

-

It is the Open Cluster Management workagent that is responsible for syncing down and applying the resources into the managed spoke cluster. It is also responsible for syncing status information back to the hub. It is the multi-cluster gateway controller that is responsible for aggregating this status.

-

The status information reported back to the Hub is used by the multi-cluster gateway controller to know what LB hosts / IPAddresses to use for DNSRecords that it creates and manages.

-

-

More info on the Open Cluster Management hub and spoke architecture can be found here

-

How does Kuadrant integrate with Gateway Providers?

-

Currently the Kuadrant data plane only integrates with an Istio based gateway provider:

-
    -
  • It registers Authorino with the IstioOperator as an auth provider so that Authorino can be used as an external auth provider.
  • -
  • It leverages an EnvoyFilter to register the rate limiting service as an upstream service.
  • -
  • Based on the Kuadrant AuthPolicy, it leverages Istio's AuthorizationPolicy resource to configure when a request should trigger Authorino to be called for a given host, path and method etc.
  • -
  • It provides a WebAssembly (WASM) Plugin that conforms to the Proxy WASM ABI (application binary interface). This WASM Plugin is loaded into the underlying Envoy based gateway provider and configured via the Kuadrant Operator based on defined RateLimitPolicy resources. This binary is executed in response to a HTTP request being accepted by the gateway via the underlying Envoy instance that provides the proxy layer for the Gateway (IE Envoy). This plugin is configured with the correct upstream rate limit service name and when it sees a request, based on the provided configuration, it will trigger a call to the installed Limitador that is providing the rate limit capabilities and either allow the request to continue or trigger a response to the client with a 429 (too many requests) HTTP code.
  • -
-

Data Flows

-

There are several different data flows when using Kuadrant.

-

Control plane configuration and status reporting

-

The initial creation of these APIs (gateways, policies etc) is done by the relevant persona in the control plane just as they would any other k8s resource. We use the term cluster admin or gateway admin as the operations type persona configuring, and placing gateways. -As shown above, in a multi-cluster configuration. API definitions are pulled from the Hub and "manifested" into the spokes. The Status of those synced resources are reported back to the Hub. The same happens for a single cluster, the only difference being the work agent hub controllers are all installed on one cluster.

-

Third party enforcement and Integration

-

In order to enforce the policy configuration, components in the control plane and data plane can reach out to configured 3rd parties such as cloud based DNS provider, TLS providers and Auth providers.

-

Request Flow

-

Requests coming through the gateway instance can be sent to Limitador based on configuration of the WASM plugin installed into the Envoy based gateway provider or to Authorino based configuration provided by the Istio AuthorizationPolicy. -Each of these components have the capability to see the request and need to in order to make the required decision. Each of these components can also prevent the request from reaching its intended backend destination based on user configuration.

-

-

Auth

-

As all of the APIs are CRDs, auth around creating these resources is handled in the standard way IE by the kubernetes cluster and RBAC. There is no relationship by default between the Auth features provided by Authorino to application developers and the auth requirements of the cluster API server.

-

For Auth between Spoke and Hub see Open Cluster Management docs

-

Observability

-

Kuadrant doesn't provide any specific observability components, but rather provides a reference setup using well known and established components along with some useful dashboards to help observe key things around the Gateways. The focus of this setup, is in the context of a multi-cluster setup where Open Cluster Management is installed and gateways are being defined and distributed from that hub.

-

-

Some notes on future direction

-

This section is here to provide some insight into architectural changes that may be seen in the near future:

-

What is in this doc represents the architecture at point our MVP release. Below are some areas that we have identified that are likely to change in the coming releases. As these happen, this doc will also evolve.

-
    -
  • We want to separate out the ocm integration into its own controller so that policies can evolve without a coupling to any one multi-cluster management solution
  • -
  • We want to separate the policies into their own controller that is capable of supporting both single (without Open Cluster Management) and multi-cluster (with Open Cluster Management enabled) contexts, so that the barrier to entry is reduced for those starting with a single cluster
  • -
  • We want to allow for an on cluster DNS Provider such as CoreDNS so that we can provide an implementation that is disconnected from any cloud provider and provides more flexible DNS setups.
  • -
  • We will look to reduce our integration with Istio and want to provide integration with additional gateway providers such as EnvoyGateway
  • -
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/architecture/docs/design/modular_installation/index.html b/dev/architecture/docs/design/modular_installation/index.html deleted file mode 100644 index d15cfbe3..00000000 --- a/dev/architecture/docs/design/modular_installation/index.html +++ /dev/null @@ -1,1426 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Kuadrant Proposal - Modular Installation - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Kuadrant Proposal - Modular Installation

-

Kuadrant is developing a set of loosely coupled functionalities built directly on top of Kubernetes. -Kuadrant aims to allow customers to just install, use and understand those functionalities they need.

-

Problem Statement

-

Currently, the installation tool of kuadrant, the kuadrantctl CLI, -installs all or nothing. Installing more than the customer needs adds unneeded complexity and operational effort. -For example, if a customer is looking for rate limiting and not interested in authentication functionality, -then the customer should be able to just install and run that part of Kuadrant.

-

High Level Goals

-
    -
  • Install only required components. Operate only required components.
  • -
-

Reduce system complexity and operational effort to the minimum required. -Components in this context make reference to deployments and running instances.

-
    -
  • Expose only the activated functionalities
  • -
-

A user of a partial Kuadrant install should not be confronted with data in custom resources that -has no meaning or is not accessible in their partial Kuadrant install. The design of the kuadrant -API should have this goal into account.

-

Proposed Solution

-

The kuadrant installation mechanism should offer modular installation to enable/disable loosely coupled pieces of kuadrant. -Modular installation options should be feature oriented rather than deployment component oriented. -Then, it is up to the installation tool to decide what components need to be deployed and how to -configure it.

-

Each feature, or part of it, is eligible to be included or excluded when installing kuadrant.

-

Some profiles can be defined to group set of commonly required features. Naming the profiles -allows the customer to easily express wanted installation configuration. Furthermore, profiles -not only can be used to group a set of features, profiles can be used to define deployment options.

- - - - - - - - - - - - - - - - - - - - - - - - - -
NameDescription
MinimalMinimal installation required to run an API without any protection, analytics or API management. Default deployment option
AuthZAuthentication and authorization mechanisms activated
RateLimitBasic rate limit (only pre-auth rate limit) features
FullFull featured kuadrant installation
-

A kuadrant operator, together with a design of a kuadrant CRD is desired. -Not only for kuadrant installation, but also for lifecycle management. -Additionally, the kuadrantctl CLI tool can also -be useful to either deploy kuadrant components and manifests or just deploy the kuadrant operator.

-

The kuadrant control plane should be aware of the installed profile via env vars or command line params -in the control plane running components. With that information, the control plane can decide to -enable or disable CRD watching, label and annotation monitoring and ultimately reject any configuration -object that relies on disabled functionality. The least a customer can expect from kuadrant is to be -consistent and reject any functionality request that cannot provide.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/authorino-operator/index.html b/dev/authorino-operator/index.html deleted file mode 100644 index 9afd291f..00000000 --- a/dev/authorino-operator/index.html +++ /dev/null @@ -1,2133 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Authorino Operator - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

Authorino Operator

-

A Kubernetes Operator to manage Authorino instances.

-

License -codecov -FOSSA Status

-

Installation

-

The Operator can be installed by applying the manifests to the Kubernetes cluster or using Operator Lifecycle Manager (OLM)

-

Applying the manifests to the cluster

-
    -
  1. Install the Operator manifests
  2. -
-
make install
-
-
    -
  1. Deploy the Operator
  2. -
-
make deploy
-
-
- Tip: Deploy a custom image of the Operator -
- To deploy an image of the Operator other than the default quay.io/kuadrant/authorino-operator:latest, specify by setting the OPERATOR_IMAGE parameter. E.g.: - -
make deploy OPERATOR_IMAGE=authorino-operator:local
-
-
- -

Installing via OLM

-

To install the Operator using the Operator Lifecycle Manager, you need to make the -Operator CSVs available in the cluster by creating a CatalogSource resource.

-

The bundle and catalog images of the Operator are available in Quay.io:

- - - - - - - - - - - -
Bundlequay.io/kuadrant/authorino-operator-bundle
Catalogquay.io/kuadrant/authorino-operator-catalog
- -
    -
  1. Create the namespace for the Operator
  2. -
-
kubectl create namespace authorino-operator
-
-
    -
  1. Create the CatalogSource resource pointing to - one of the images from in the Operator's catalog repo:
  2. -
-
kubectl -n authorino-operator apply -f -<<EOF
-apiVersion: operators.coreos.com/v1alpha1
-kind: CatalogSource
-metadata:
-  name: operatorhubio-catalog
-  namespace: authorino-operator
-spec:
-  sourceType: grpc
-  image: quay.io/kuadrant/authorino-operator-catalog:latest
-  displayName: Authorino Operator
-EOF
-
-

Deploy authorino operator using operator-sdk

-
    -
  1. Install operator-sdk bin -
    make operator-sdk
    -
  2. -
  3. Run operator-sdk bundle command -
    ./bin/operator-sdk run bundle quay.io/kuadrant/authorino-operator-bundle:latest
    -
    -Note: For s390x & ppc64le , use operator-sdk to install authorino-operator
  4. -
-

Requesting an Authorino instance

-

Once the Operator is up and running, you can request instances of Authorino by creating Authorino CRs. E.g.:

-
kubectl -n default apply -f -<<EOF
-apiVersion: operator.authorino.kuadrant.io/v1beta1
-kind: Authorino
-metadata:
-  name: authorino
-spec:
-  listener:
-    tls:
-      enabled: false
-  oidcServer:
-    tls:
-      enabled: false
-EOF
-
-

The Authorino Custom Resource Definition (CRD)

-

API to install, manage and configure Authorino authorization services .

-

Each Authorino -Custom Resource (CR) represents an instance of Authorino deployed to the cluster. The Authorino Operator will reconcile -the state of the Kubernetes Deployment and associated resources, based on the state of the CR.

-

API Specification

- - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
specAuthorinoSpecSpecification of the Authorino deployment.Required
-

AuthorinoSpec

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
clusterWideBooleanSets the Authorino instance's watching scope – cluster-wide or namespaced.Default: true (cluster-wide)
authConfigLabelSelectorsStringLabel selectors used by the Authorino instance to filter AuthConfig-related reconciliation events.Default: empty (all AuthConfigs are watched)
secretLabelSelectorsStringLabel selectors used by the Authorino instance to filter Secret-related reconciliation events (API key and mTLS authentication methods).Default: authorino.kuadrant.io/managed-by=authorino
supersedingHostSubsetsBooleanEnable/disable allowing AuthConfigs to supersede strict subsets of hosts already taken.Default: false
replicasIntegerNumber of replicas desired for the Authorino instance. Values greater than 1 enable leader election in the Authorino service, where the leader updates the statuses of the AuthConfig CRs).Default: 1
evaluatorCacheSizeIntegerCache size (in megabytes) of each Authorino evaluator (when enabled in an AuthConfig).Default: 1
imageStringAuthorino image to be deployed (for dev/testing purpose only).Default: quay.io/kuadrant/authorino:latest
imagePullPolicyStringSets the imagePullPolicy of the Authorino Deployment (for dev/testing purpose only).Default: k8s default
logLevelStringDefines the level of log you want to enable in Authorino (debug, info and error).Default: info
logModeStringDefines the log mode in Authorino (development or production).Default: production
listenerListenerSpecification of the authorization service (gRPC interface).Required
oidcServerOIDCServerSpecification of the OIDC service.Required
tracingTracingConfiguration of the OpenTelemetry tracing exporter.Optional
metricsMetricsConfiguration of the metrics server (port, level).Optional
healthzHealthzConfiguration of the health/readiness probe (port).Optional
volumesVolumesSpecAdditional volumes to be mounted in the Authorino pods.Optional
-

Listener

-

Configuration of the authorization server – gRPC -and raw HTTP -interfaces

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
portIntegerPort number of authorization server (gRPC interface).DEPRECATED
Use ports instead
portsPortsPort numbers of the authorization server (gRPC and raw HTTPinterfaces).Optional
tlsTLSTLS configuration of the authorization server (GRPC and HTTP interfaces).Required
timeoutIntegerTimeout of external authorization request (in milliseconds), controlled internally by the authorization server.Default: 0 (disabled)
-

OIDCServer

-

Configuration of the OIDC Discovery server for Festival Wristband -tokens.

- - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
portIntegerPort number of OIDC Discovery server for Festival Wristband tokens.Default: 8083
tlsTLSTLS configuration of the OIDC Discovery server for Festival Wristband tokensRequired
-

TLS

-

TLS configuration of server. Appears in listener and oidcServer.

- - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
enabledBooleanWhether TLS is enabled or disabled for the server.Default: true
certSecretRefLocalObjectReferenceThe reference to the secret that contains the TLS certificates tls.crt and tls.key.Required when enabled: true
-

Ports

-

Port numbers of the authorization server.

- - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
grpcIntegerPort number of the gRPC interface of the authorization server. Set to 0 to disable this interface.Default: 50001
httpIntegerPort number of the raw HTTP interface of the authorization server. Set to 0 to disable this interface.Default: 5001
-

Tracing

-

Configuration of the OpenTelemetry tracing exporter.

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
endpointStringFull endpoint of the OpenTelemetry tracing collector service (e.g. http://jaeger:14268/api/traces).Required
tagsMapKey-value map of fixed tags to add to all OpenTelemetry traces emitted by Authorino.Optional
insecureBooleanEnable/disable insecure connection to the tracing endpointDefault: false
-

Metrics

-

Configuration of the metrics server.

- - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
portIntegerPort number of the metrics server.Default: 8080
deepBooleanEnable/disable metrics at the level of each evaluator config (if requested in the AuthConfig) exported by the metrics server.Default: false
-

Healthz

-

Configuration of the health/readiness probe (port).

- - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
portIntegerPort number of the health/readiness probe.Default: 8081
-

VolumesSpec

-

Additional volumes to project in the Authorino pods. Useful for validation of TLS self-signed certificates of external -services known to have to be contacted by Authorino at runtime.

- - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
items[]VolumeSpecList of additional volume items to project.Optional
defaultModeIntegerMode bits used to set permissions on the files. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.Optional
-

VolumeSpec

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescriptionRequired/Default
nameStringName of the volume and volume mount within the Deployment. It must be unique in the CR.Optional
mountPathStringAbsolute path where to mount all the items.Required
configMaps[]StringList of of Kubernetes ConfigMap names to mount.Required exactly one of: confiMaps, secrets.
secrets[]StringList of of Kubernetes Secret names to mount.Required exactly one of: confiMaps, secrets.
items[]KeyToPathMount details for selecting specific ConfigMap or Secret entries.Optional
-

Full example

-
apiVersion: operator.authorino.kuadrant.io/v1beta1
-kind: Authorino
-metadata:
-  name: authorino
-spec:
-  clusterWide: true
-  authConfigLabelSelectors: environment=production
-  secretLabelSelectors: authorino.kuadrant.io/component=authorino,environment=production
-
-  replicas: 2
-
-  evaluatorCacheSize: 2 # mb
-
-  image: quay.io/kuadrant/authorino:latest
-  imagePullPolicy: Always
-
-  logLevel: debug
-  logMode: production
-
-  listener:
-    ports:
-      grpc: 50001
-      http: 5001
-    tls:
-      enabled: true
-      certSecretRef:
-        name: authorino-server-cert # secret must contain `tls.crt` and `tls.key` entries
-
-  oidcServer:
-    port: 8083
-    tls:
-      enabled: true
-      certSecretRef:
-        name: authorino-oidc-server-cert # secret must contain `tls.crt` and `tls.key` entries
-
-  metrics:
-    port: 8080
-    deep: true
-
-  volumes:
-    items:
-
-      - name: keycloak-tls-cert
-        mountPath: /etc/ssl/certs
-        configMaps:
-          - keycloak-tls-cert
-        items: # details to mount the k8s configmap in the authorino pods
-          - key: keycloak.crt
-            path: keycloak.crt
-    defaultMode: 420
-
-

Removal

-

Removing the operator installed via manifests

-
    -
  1. Undeploy the Operator
  2. -
-
make undeploy
-
-
    -
  1. Remove the Operator manifests
  2. -
-
make uninstall
-
-

Remove dependencies (Optional)

-
    -
  1. -

    Remove operator namespace -

    make delete-namespace
    -

    -
  2. -
  3. -

    Uninstall cert manager -

    make uninstall-cert-manager
    -

    -
  4. -
-

License

-

FOSSA Status

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/authorino/docs/code_of_conduct/index.html b/dev/authorino/docs/code_of_conduct/index.html deleted file mode 100644 index 5ab05f5b..00000000 --- a/dev/authorino/docs/code_of_conduct/index.html +++ /dev/null @@ -1,1347 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Code of conduct - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
- -
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/authorino/index.html b/dev/authorino/index.html deleted file mode 100644 index 90235675..00000000 --- a/dev/authorino/index.html +++ /dev/null @@ -1,1819 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Authorino - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Authorino

-

Kubernetes-native authorization service for tailor-made Zero Trust API security.

-

A lightweight Envoy external authorization server fully manageable via Kubernetes Custom Resources.
-JWT authentication, API key, mTLS, pattern-matching authz, OPA, K8s SA tokens, K8s RBAC, external metadata fetching, and more, with minimum to no coding at all, no rebuilding of your applications.

-

Authorino is not about inventing anything new. It's about making the best things about auth out there easy and simple to use. Authorino is multi-tenant, it's cloud-native and it's open source.

-

License -Unit Tests -End-to-end Tests -Smoke Tests -FOSSA Status

-

Getting started

-
    -
  1. Deploy with the Authorino Operator
  2. -
  3. Setup Envoy proxy and the external authorization filter
  4. -
  5. Apply an Authorino AuthConfig custom resource
  6. -
  7. Obtain an authentication token and start sending requests
  8. -
-

The full Getting started page of the docs provides details for the steps above, as well as information about requirements and next steps.

-

Or try out our Hello World example.

-

For general information about protecting your service using Authorino, check out the docs.

-

Use-cases

-

The User guides section of the docs gathers several AuthN/AuthZ use-cases as well as the instructions to implement them using Authorino. A few examples are:

- -

How it works

-

Authorino enables hybrid API security, with usually no code changes required to your application, tailor-made for your own combination of authentication standards and protocols and authorization policies of choice.

-

Authorino implements Envoy Proxy's external authorization gRPC protocol, and is a part of Red Hat Kuadrant architecture.

-

Under the hood, Authorino is based on Kubernetes Custom Resource Definitions and the Operator pattern.

-

Bootstrap and configuration:

-
    -
  1. Deploy the service/API to be protected ("Upstream"), Authorino and Envoy
  2. -
  3. Write and apply an Authorino AuthConfig Custom Resource associated to the public host of the service
  4. -
-

Request-time:

-

- - How it works -

-
    -
  1. A user or service account ("Consumer") obtains an access token to consume resources of the Upstream service, and sends a request to the Envoy ingress endpoint
  2. -
  3. The Envoy proxy establishes fast gRPC connection with Authorino carrying data of the HTTP request (context info), which causes Authorino to lookup for an AuthConfig Custom Resource to enforce (pre-cached)
  4. -
  5. Identity verification (authentication) phase - Authorino verifies the identity of the consumer, where at least one authentication method/identity provider must go through
  6. -
  7. External metadata phase - Authorino fetches additional metadata for the authorization from external sources (optional)
  8. -
  9. Policy enforcement (authorization) phase - Authorino takes as input a JSON composed out of context data, resolved identity object and fetched additional metadata from previous phases, and triggers the evaluation of user-defined authorization policies
  10. -
  11. Response (metadata-out) phase – Authorino builds user-defined custom responses (dynamic JSON objects and/or Festival Wristband OIDC tokens), to be supplied back to the client and/or upstream service within added HTTP headers or as Envoy Dynamic Metadata (optional)
  12. -
  13. Callbacks phase – Authorino sends callbacks to specified HTTP endpoints (optional)
  14. -
  15. Authorino and Envoy settle the authorization protocol with either OK/NOK response
  16. -
  17. If authorized, Envoy triggers other HTTP filters in the chain (if any), pre-injecting eventual dynamic metadata returned by Authorino, and ultimately redirects the request to the Upstream
  18. -
  19. The Upstream serves the requested resource to the consumer
  20. -
-
-More -

The Architecture section of the docs covers details of protecting your APIs with Envoy and Authorino, including information about topology (centralized gateway, centralized authorization service or sidecars), deployment modes (cluster-wide reconciliation vs. namespaced instances), an specification of Authorino's AuthConfig Custom Resource Definition (CRD) and more.

-

You will also find in that section information about what happens in request-time (aka Authorino's Auth Pipeline) and how to leverage the Authorization JSON for writing policies, dynamic responses and other features of Authorino.

-
-

List of features

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FeatureStage
Identity verification & authenticationJOSE/JWT validation (OpenID Connect)Ready
OAuth 2.0 Token Introspection (opaque tokens)Ready
Kubernetes TokenReview (SA tokens)Ready
OpenShift User-echo endpointIn analysis
API key authenticationReady
mTLS authenticationReady
HMAC authenticationPlanned (#9)
Plain (resolved beforehand and injected in the payload)Ready
Anonymous accessReady
Ad hoc external metadata fetchingOpenID Connect User InfoReady
UMA-protected resource attributesReady
HTTP GET/GET-by-POSTReady
Policy enforcement/authorizationJSON pattern matching (e.g. JWT claims, request attributes checking)Ready
OPA/Rego policies (inline and pull from registry)Ready
Kubernetes SubjectAccessReview (resource and non-resource attributes)Ready
Authzed/SpiceDBReady
Keycloak Authorization Services (UMA-compliant Authorization API)In analysis
Custom responsesFestival Wristbands tokens (token normalization, Edge Authentication Architecture)Ready
JSON injection (header injection, Envoy Dynamic Metadata)Ready
Plain text value (header injection)Ready
Custom response status code/messages (e.g. redirect)Ready
CallbacksHTTP endpointsReady
CachingOpenID Connect and User-Managed Access configsReady
JSON Web Keys (JWKs) and JSON Web Key Sets (JWKS)Ready
Access tokensReady
External metadataReady
Precompiled Rego policiesReady
Policy evaluationReady
Sharding (lookup performance, multitenancy)Ready
- -

For a detailed description of the features above, refer to the Features page.

-

FAQ

-
-Do I need to deploy Envoy? -

Authorino is built from the ground up to work well with Envoy. It is strongly recommended that you leverage Envoy along side Authorino. That said, it is possible to use Authorino without Envoy.

-

Authorino implements Envoy's external authorization gRPC protocol and therefore will accept any client request that complies.

-

Authorino also provides a second interface for raw HTTP authorization, suitable for using with Kubernetes ValidatingWebhook and other integrations (e.g. other proxies).

-

The only attribute of the authorization request that is strictly required is the host name. (See Host lookup for more information.) The other attributes, such as method, path, headers, etc, might as well be required, depending on each AuthConfig. In the case of the gRPC CheckRequest method, the host is supplied in Attributes.Request.Http.Host and alternatively in Attributes.ContextExtensions["host"]. For raw HTTP authorization requests, the host must be supplied in Host HTTP header.

-

Check out Kuadrant for easy-to-use Envoy and Authorino deployment & configuration for API management use-cases, using Kubernetes Custom Resources.

-
-
-Is Authorino an Identity Provider (IdP)? -

No, Authorino is not an Identity Provider (IdP). Neither it is an auth server of any kind, such as an OAuth2 server, an OpenID Connect (OIDC) server, a Single Sign On (SSO) server.

-

Authorino is not an identity broker either. It can verify access tokens from multiple trusted sources of identity and protocols, but it will not negotiate authentication flows for non-authenticated access requests. Some tricks nonetheless can be done, for example, to redirect unauthenticated users to a login page.

-

For an excellent auth server that checks all the boxes above, check out Keycloak.

-
-
-How does Authorino compare to Keycloak? -

Keycloak is a proper auth server and identity provider (IdP). It offers a huge set of features for managing identities, identity sources with multiple user federation options, and a platform for authentication and authorization services.

-

Keycloak exposes authenticators that implement protocols such as OpenID Connect. The is a one-time flow that establishes the delegation of power to a client, for a short period of time. To be consistent with Zero Trust security, you want a validator to verify the short-lived tokens in every request that tries to reach your protected service/resource. This step that will repeat everytime could save heavy looking up into big tables of tokens and leverage cached authorization policies for fast in-memory evaluation. This is where Authorino comes in.

-

Authorino verifies and validates Keycloak-issued ID tokens. OpenID Connect Discovery is used to request and cache JSON Web Key Sets (JWKS), used to verify the signature of the tokens without having to contact again with the Keycloak server, or looking in a table of credentials. Moreover, user long-lived credentials are safe, rather than spread in hops across the network.

-

You can also use Keycloak for storing auth-relevant resource metadata. These can be fetched by Authorino in request-time, to be combined into your authorization policies. See Keycloak Authorization Services and User-Managed Access (UMA) support, as well as Authorino UMA external metadata counter-part.

-
-
-Why doesn't Authorino handle OAuth flows? -

It has to do with trust. OAuth grants are supposed to be negotiated directly between whoever owns the long-lived credentials in one hand (user, service accounts), and the trustworthy auth server that receives those credentials – ideally with minimum number of hops in the middle – and exchanges them for short-lived access tokens, on the other end.

-

There are use-cases for Authorino running in the edge (e.g. Edge Authentication Architecture and token normalization), but in most cases Authorino should be seen as a last-mile component that provides decoupled identity verification and authorization policy enforcement to protected services in request-time. In this sense, the OAuth grant is a pre-flight exchange that happens once and as direct and safe as possible, whereas auth enforcement is kept lightweight and efficient.

-
-
-Where does Authorino store users and roles? -

Authorino does not store users, roles, role bindings, access control lists, or any raw authorization data. Authorino handles policies, where even these policies can be stored elsewhere (as opposed to stated inline inside of an Authorino AuthConfig CR).

-

Authorino evaluates policies for stateless authorization requests. Any additional context is either resolved from the provided payload or static definitions inside the policies. That includes extracting user information from a JWT or client TLS certificate, requesting user metadata from opaque authentication tokens (e.g. API keys) to the trusted sources actually storing that content, obtaining synchronous HTTP metadata from services, etc.

-

In the case of authentication with API keys, as well as its derivative to model HTTP Basic Auth, user data are stored in Kubernetes Secrets. The secret's keys, annotations and labels are usually the structures used to organize the data that later a policy evaluated in Authorino may require. Strictly, those are not Authorino data structures.

-
-
-Can't I just use Envoy JWT Authentication and RBAC filters? -

Envoy's JWT Authentication works pretty much similar to Authorino's JOSE/JWT verification and validation for OpenID Connect. In both cases, the JSON Web Key Sets (JWKS) to verify the JWTs are auto-loaded and cached to be used in request-time. Moreover, you can configure for details such as where to extract the JWT from the HTTP request (header, param or cookie) and do some cool tricks regarding how dynamic metadata based on JWT claims can be injected to consecutive filters in the chain.

-

However, in terms of authorization, while Envoy's implementation essentially allows to check for the list of audiences (aud JWT claim), Authorino opens up for a lot more options such as pattern-matching rules with operators and conditionals, built-in OPA and other methods of evaluating authorization policies.

-

Authorino also allows to combine JWT authentication with other types of authentication to support different sources of identity and groups of users such as API keys, Kubernetes tokens, OAuth opaque tokens , etc.

-

In summary, Envoy's JWT Authentication and Envoy RBAC filter are excellent solutions for simple use-cases where JWTs from one single issuer is the only authentication method you are planning to support and limited to no authorization rules suffice. On the other hand, if you need to integrate more identity sources, different types of authentication, authorization policies, etc, you might to consider Authorino.

-
-
-Should I use Authorino if I already have Istio configured? -

Istio is a great solution for managing service meshes. It delivers an excellent platform with an interesting layer of abstraction on top of Envoy proxy's virtual omnipresence within the mesh.

-

There are lots of similarities, but also complementarity between Authorino and Istio and Istio Authorization in special.

-

Istio provides a simple way to enable features that are, in many cases, features of Envoy, such as authorization based on JWTs, authorization based on attributes of the request, and activation of external authorization services, without having to deal with complex Envoy config files. See Kuadrant for a similar approach, nonetheless leveraging features of Istio as well.

-

Authorino is an Envoy-compatible external authorization service. One can use Authorino with or without Istio.

-

In particular, Istio Authorization Policies can be seen, in terms of functionality and expressiveness, as a subset of one type of authorization policies supported by Authorino, the pattern-matching authorization policies. While Istio, however, is heavily focused on specific use cases of API Management, offering a relatively limited list of supported attribute conditions, Authorino is more generic, allowing to express authorization rules for a wider spectrum of use cases – ACLs, RBAC, ABAC, etc, pretty much counting on any attribute of the Envoy payload, identity object and external metadata available.

-

Authorino also provides built-in OPA authorization, several other methods of authentication and identity verification (e.g. Kubernetes token validation, API key-based authentication, OAuth token introspection, OIDC-discoverable JWT verification, etc), and features like fetching of external metadata (HTTP services, OIDC userinfo, UMA resource data), token normalization, wristband tokens and dynamic responses. These all can be used independently or combined, in a simple and straightforward Kubernetes-native fashion.

-

In summary, one might value Authorino when looking for a policy enforcer that offers:

-
    -
  1. multiple supported methods and protocols for rather hybrid authentication, encompassing future and legacy auth needs;
  2. -
  3. broader expressiveness and more functionalities for the authorization rules;
  4. -
  5. authentication and authorization in one single declarative manifest;
  6. -
  7. capability to fetch auth metadata from external sources on-the-fly;
  8. -
  9. built-in OPA module;
  10. -
  11. easy token normalization and/or aiming for Edge Authentication Architecture (EAA).
  12. -
-

The good news is that, if you have Istio configured, then you have Envoy and the whole platform for wiring Authorino up if you want to. 😉

-
-
-Do I have to learn OPA/Rego language to use Authorino? -

No, you do not. However, if you are comfortable with Rego from Open Policy Agent (OPA), there are some quite interesting things you can do in Authorino, just as you would in any OPA server or OPA plugin, but leveraging Authorino's built-in OPA module instead. Authorino's OPA module is compiled as part of Authorino's code directly from the Golang packages, and imposes no extra latency to the evaluation of your authorization policies. Even the policies themselves are pre-compiled in reconciliation-time, for fast evaluation afterwards, in request-time.

-

On the other hand, if you do not want to learn Rego or in any case would like to combine it with declarative and Kubernetes-native authN/authZ spec for your services, Authorino does complement OPA with at least two other methods for expressing authorization policies – i.e. pattern-matching authorization and Kubernetes SubjectAccessReview, the latter allowing to rely completely on the Kubernetes RBAC.

-

You break down, mix and combine these methods and technolgies in as many authorization policies as you want, potentially applying them according to specific conditions. Authorino will trigger the evaluation of concurrent policies in parallel, aborting the context if any of the processes denies access.

-

Authorino also packages well-established industry standards and protocols for identity verification (JOSE/JWT validation, OAuth token introspection, Kubernetes TokenReview) and ad-hoc request-time metadata fetching (OIDC userinfo, User-Managed Access (UMA)), and corresponding layers of caching, without which such functionalities would have to be implemented by code.

-
-
-Can I use Authorino to protect non-REST APIs? -

Yes, you can. In principle, the API format (REST, gRPC, GraphQL, etc) should not matter for the authN/authZ enforcer. There are a couple points to consider though.

-

While REST APIs are designed in a way that, in most cases, information usually needed for the evaluation of authorization policies are available in the metadata of the HTTP request (method, path, headers), other API formats quite often will require processing of the HTTP body. By default, Envoy's external authorization HTTP filter will not forward the body of the request to Authorino; to change that, enable the with_request_body option in the Envoy configuration for the external authorization filter. E.g.:

-
with_request_body:
-  max_request_bytes: 1024
-  allow_partial_message: true
-  pack_as_bytes: true
-
-

Additionally, when enabling the request body passed in the payload to Authorino, parsing of the content should be of concern as well. Authorino provides easy access to attributes of the HTTP request, parsed as part of the Authorization JSON, however the body of the request is passed as string and should be parsed by the user according to each case.

-

Check out Authorino OPA authorization and the Rego Encoding functions for options to parse serialized JSON, YAML and URL-encoded params. For XML transformation, an external parsing service connected via Authorino's HTTP GET/GET-by-POST external metadata might be required.

-
-
-Can I run Authorino other than on Kubernetes? -

As of today, no, you cannot, or at least it wouldn't suit production requirements.

-
-
-Do I have to be admin of the cluster to install Authorino? -

To install the Authorino Custom Resource Definition (CRD) and to define cluster roles required by the Authorino service, admin privilege to the Kubernetes cluster is required. This step happens only once per cluster and is usually equivalent to installing the Authorino Operator.

-

Thereafter, deploying instances of the Authorino service and applying AuthConfig custom resources to a namespace depend on the permissions set by the cluster administrator – either directly by editing the bindings in the cluster's RBAC, or via options of the operator. In most cases, developers will be granted permissions to create and manage AuthConfigs, and sometimes to deploy their own instances of Authorino.

-
-
-Is it OK to store AuthN/AuthZ configs as Kubernetes objects? -

Authorino's API checks all the bullets to be aggregated to the Kubernetes cluster APIs, and therefore using Custom Resource Definition (CRD) and the Operator pattern has always been an easy design decision.

-

By merging the definitions of service authN/authZ to the control plane, Authorino AuthConfig resources can be thought as extensions of the specs of the desired state of services regarding the data flow security. The Authorino custom controllers, built-in into the authorization service, are the agents that read from that desired state and reconcile the processes operating in the data plane.

-

Authorino is declarative and seamless for developers and cluster administrators managing the state of security of the applications running in the server, used to tools such as kubectl, the Kubernetes UI and its dashboards. Instead of learning about yet another configuration API format, Authorino users can jump straight to applying and editing YAML or JSON structures they already know, in a way that things such as spec, status, namespace and labels have the meaning they are expected to have, and docs are as close as kubectl explain. Moreover, Authorino does not pile up any other redundant layers of APIs, event-processing, RBAC, transformation and validation webhooks, etc. It is Kubernetes in its best.

-

In terms of scale, Authorino AuthConfigs should grow proportionally to the number of protected services, virtually limited by nothing but the Kubernetes API data storage, while namespace division and label selectors help adjust horizontally and keep distributed.

-

In other words, there are lots of benefits of using Kubernetes custom resources and custom controllers, and unless you are planning on bursting your server with more services than it can keep record, it is totally 👍 to store your AuthN/AuthZ configs as cluster API objects.

-
-
-Can I use Authorino for rate limiting? -

You can, but you shouldn't. Check out instead Limitador, for simple and efficient global rate limiting. Combine it with Authorino and Authorino's support for Envoy Dynamic Metadata for authenticated rate limiting.

-
-

Benchmarks

-

Configuration of the tests (Authorino features):

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Performance testIdentityMetadataAuthorizationResponse
ReconcileAuthConfigOIDC/JWTUserInfo, UMAOPA
(inline Rego)
-
AuthPipelineOIDC/JWT-JSON pattern-matching
(JWT claim check)
-
APIKeyAuthnAPI keyN/AN/AN/A
JSONPatternMatchingAuthzN/AN/AJSON pattern-matchingN/A
OPAAuthzN/AN/AOPA
(inline Rego)
N/A
-

Platform: linux/amd64
-CPU: Intel® Xeon® Platinum 8370C 2.80GHz
-Cores: 1, 4, 10

-

Results: -

ReconcileAuthConfig:
-
-        │   sec/op    │     B/op     │  allocs/op  │
-
-*         1.533m ± 2%   264.4Ki ± 0%   6.470k ± 0%
-*-4       1.381m ± 6%   264.5Ki ± 0%   6.471k ± 0%
-*-10      1.563m ± 5%   270.2Ki ± 0%   6.426k ± 0%
-geomean   1.491m        266.4Ki        6.456k
-
-AuthPipeline:
-
-        │   sec/op    │     B/op     │ allocs/op  │
-
-*         388.0µ ± 2%   80.70Ki ± 0%   894.0 ± 0%
-*-4       348.4µ ± 5%   80.67Ki ± 2%   894.0 ± 3%
-*-10      356.4µ ± 2%   78.97Ki ± 0%   860.0 ± 0%
-geomean   363.9µ        80.11Ki        882.5
-
-APIKeyAuthn:
-
-        │   sec/op    │    B/op      │ allocs/op  │
-
-*         3.246µ ± 1%   480.0 ± 0%     6.000 ± 0%
-*-4       3.111µ ± 0%   480.0 ± 0%     6.000 ± 0%
-*-10      3.091µ ± 1%   480.0 ± 0%     6.000 ± 0%
-geomean   3.148µ        480.0          6.000
-
-OPAAuthz vs JSONPatternMatchingAuthz:
-
-        │   OPAAuthz   │      JSONPatternMatchingAuthz       │
-        │    sec/op    │   sec/op     vs base                │
-
-*         87.469µ ± 1%   1.797µ ± 1%  -97.95% (p=0.000 n=10)
-*-4       95.954µ ± 3%   1.766µ ± 0%  -98.16% (p=0.000 n=10)
-*-10      96.789µ ± 4%   1.763µ ± 0%  -98.18% (p=0.000 n=10)
-geomean    93.31µ        1.775µ       -98.10%
-
-        │   OPAAuthz    │      JSONPatternMatchingAuthz      │
-        │     B/op      │    B/op     vs base                │
-
-*         28826.00 ± 0%   64.00 ± 0%  -99.78% (p=0.000 n=10)
-*-4       28844.00 ± 0%   64.00 ± 0%  -99.78% (p=0.000 n=10)
-*-10      28862.00 ± 0%   64.00 ± 0%  -99.78% (p=0.000 n=10)
-geomean    28.17Ki        64.00       -99.78%
-
-        │   OPAAuthz   │      JSONPatternMatchingAuthz      │
-        │  allocs/op   │ allocs/op   vs base                │
-
-*         569.000 ± 0%   2.000 ± 0%  -99.65% (p=0.000 n=10)
-*-4       569.000 ± 0%   2.000 ± 0%  -99.65% (p=0.000 n=10)
-*-10      569.000 ± 0%   2.000 ± 0%  -99.65% (p=0.000 n=10)
-geomean     569.0        2.000       -99.65%
-

-

Contributing

-

If you are interested in contributing to Authorino, please refer to the Developer's guide for info about the stack and requirements, workflow, policies and Code of Conduct.

-

Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.

-

License

-

FOSSA Status

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/dns-operator/config/samples/kuadrant.io_v1alpha1_dnshealthcheckprobe.yaml b/dev/dns-operator/config/samples/kuadrant.io_v1alpha1_dnshealthcheckprobe.yaml deleted file mode 100644 index 870d5193..00000000 --- a/dev/dns-operator/config/samples/kuadrant.io_v1alpha1_dnshealthcheckprobe.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kuadrant.io/v1alpha1 -kind: DNSHealthCheckProbe -metadata: - name: $NAME -spec: - port: 443 - hostname: test.com - address: 192.168.0.16 - path: /healthz - protocol: HTTPS - interval: 60s - additionalHeadersRef: - name: headers - failureThreshold: 5 - allowInsecureCertificate: True \ No newline at end of file diff --git a/dev/dns-operator/config/samples/kuadrant.io_v1alpha1_dnsrecord.yaml b/dev/dns-operator/config/samples/kuadrant.io_v1alpha1_dnsrecord.yaml deleted file mode 100644 index 588aecff..00000000 --- a/dev/dns-operator/config/samples/kuadrant.io_v1alpha1_dnsrecord.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: kuadrant.io/v1alpha1 -kind: DNSRecord -metadata: - labels: - app.kubernetes.io/name: dnsrecord - app.kubernetes.io/instance: dnsrecord-sample - app.kubernetes.io/part-of: dns-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: dns-operator - name: dnsrecord-sample -spec: - providerRef: - name: dns-provider-creds - endpoints: - - dnsName: dnsrecord-simple.example.com - recordTTL: 60 - recordType: A - targets: - - 52.215.108.61 - - 52.30.101.221 diff --git a/dev/dns-operator/config/samples/kustomization.yaml b/dev/dns-operator/config/samples/kustomization.yaml deleted file mode 100644 index 7d949a0e..00000000 --- a/dev/dns-operator/config/samples/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -## Append samples of your project ## -resources: -- kuadrant.io_v1alpha1_dnsrecord.yaml -- kuadrant.io_v1alpha1_dnshealthcheckprobe.yaml -#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/dev/dns-operator/docs/RELEASE/index.html b/dev/dns-operator/docs/RELEASE/index.html deleted file mode 100644 index 884af2bb..00000000 --- a/dev/dns-operator/docs/RELEASE/index.html +++ /dev/null @@ -1,1487 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - RELEASE - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

RELEASE

- -

Release

-

New Major.Minor version

-
    -
  1. Create a new minor release branch from the HEAD of main: -
    git checkout -b release-0.2
    -
  2. -
  3. Run prepare release: -
    make prepare-release IMG_TAG=release-0.2 VERSION=0.2.0-dev CHANNELS=alpha REPLACES_VERSION=0.1.0
    -
  4. -
  5. Verify local changes, commit and push: -
    git add .
    -git commit -m "prepare-release: release-0.2"
    -git push upstream release-0.2
    -
  6. -
  7. -

    Verify that the build image workflow is triggered and completes for the new branch

    -
  8. -
  9. -

    Do any final testing and bug fixing against the release branch, see Verify OLM Deployment

    -
  10. -
  11. -

    Run prepare release for final version -

    make prepare-release VERSION=0.2.0 CHANNELS=stable REPLACES_VERSION=0.1.0
    -

    -
  12. -
  13. Verify local changes, commit, push and tag: -
    git add .
    -git commit -m "prepare-release: v0.2.0"
    -git tag v0.2.0
    -git push upstream release-0.2
    -git push upstream v0.2.0
    -
  14. -
  15. -

    Verify that the build release tag workflow is triggered and completes for the new tag

    -
  16. -
  17. -

    Verify the new version can be installed from the catalog image, see Verify OLM Deployment

    -
  18. -
  19. -

    Release to the community operator index catalogs.

    -
  20. -
-

New Patch version

-
    -
  1. Checkout minor release branch: -
    git checkout release-0.2
    -
  2. -
  3. Run prepare release: -
    make prepare-release VERSION=0.2.1 CHANNELS=stable REPLACES_VERSION=0.2.0
    -
  4. -
  5. Verify local changes, commit and push: -
    git add .
    -git commit -m "prepare-release: v0.2.1"
    -git tag v0.2.1
    -git push upstream release-0.2
    -git push upstream v0.2.1
    -
  6. -
  7. -

    Verify that the build release tag workflow is triggered and completes for the new tag

    -
  8. -
  9. -

    Verify the new version can be installed from the catalog image, see Verify OLM Deployment

    -
  10. -
  11. -

    Release to the community operator index catalogs.

    -
  12. -
-

Verify OLM Deployment

-
    -
  1. -

    Deploy the OLM catalog image: -

    make local-setup install-olm deploy-catalog
    -

    -
  2. -
  3. -

    Wait for deployment: -

    kubectl -n dns-operator-system wait --timeout=60s --for=condition=Available deployments --all
    -deployment.apps/dns-operator-controller-manager condition met
    -

    -
  4. -
  5. -

    Check the logs: -

    kubectl -n dns-operator-system logs -f deployment/dns-operator-controller-manager
    -

    -
  6. -
  7. -

    Check the version: -

    $ kubectl -n dns-operator-system get deployment dns-operator-controller-manager --show-labels
    -NAME                              READY   UP-TO-DATE   AVAILABLE   AGE     LABELS
    -dns-operator-controller-manager   1/1     1            1           5m42s   app.kubernetes.io/component=manager,app.kubernetes.io/created-by=dns-operator,
    -app.kubernetes.io/instance=controller-manager,app.kubernetes.io/managed-by=kustomize,app.kubernetes.io/name=deployment,app.kubernetes.io/part-of=dns-operator,
    -control-plane=dns-operator-controller-manager,olm.deployment-spec-hash=1jPe8AuMpSKHh51nnDs4j25ZgoUrKhF45EP0Wa,olm.managed=true,olm.owner.kind=ClusterServiceVersion,
    -olm.owner.namespace=dns-operator-system,olm.owner=dns-operator.v0.2.0-dev,operators.coreos.com/dns-operator.dns-operator-system=
    -

    -
  8. -
-

Community Operator Index Catalogs

- - - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/index.html b/dev/dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/index.html deleted file mode 100644 index 23c5b51e..00000000 --- a/dev/dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/index.html +++ /dev/null @@ -1,1357 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - List of issues - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

List of issues

- -

The idea

-

We now will constantly reconcile DNS records. The reasoning is that other controllers may override/change records in the DNS provider so there is a need to requeue the DNS Record from time to time even when no local changes are introduced.

-

Details

-

There are a few new fields on the DNS Record status:

-
    -
  • QueuedAt is a time when the DNS record was received for the reconciliation
  • -
  • ValidFor indicates the duration since the last reconciliation we consider data in the record to be valid
  • -
  • WriteCounter represents a number of consecutive write attempts on the same generation of the record. It is being reset to 0 when the generation changes or there are no changes to write.
  • -
-

There is an option to override the ValidFor and DefaultRequeueTime with valid-for and requeue-time flags respectively.

-

The DefaultRequeueTime is the duration between successful validation and the next reconciliation to ensure that the record is still up-to-date.

-

The ValidFor is used to determine if we should do a full reconciliation when we get the record. If the record is still valid we will only update finalizers and validate the record itself. It will not perform anything that involves a DNS provider.

-

DNS Record normal lifecycle

-

Once we enqueue the DNS record, controller will compile a list of changes to the DNS provider and will apply it. After this, the record is enqueued with the validationRequeueTime and the Ready condition will be marked as false with a message Awaiting Validation. When the record is received again and the controller ensures there are no changes needed (the ones applied are present in the DNS Provider) it sets the Ready condition to true and enqueues it with the defaultRequeueTime.

-

Upon deletion, the process will be similar. The controller will determine the changes needed to the DNS provider and will apply them. The record will be requeued with the validationRequeueTime. Once we receive it back and ensure that there are no changes needed for the DNS provider we remove the finalizer from the record.

-

The validationRequeueTime duration is randomized +/- 50%.

-

When things go south

-

If the record is received prematurely - the ValidFor + QueuedAt is more than the current time - we requeue it again for the ValidFor duration.

-

When we encounter an error during the reconciliation we will not requeue the record and will put in an appropriate error message in the log and on the record. In order for it to reconcile again there must be a change to the DNS Record CR.

-

It is possible for a user to mess with the timestamps field or the ValidFor field. Kubernetes will not let setting an invalid value to the timestamp fields. Once the timestamp fields are set manually it will trigger reconciliation since there is a change in the record CR. The only one that could impact the controller is the QueuedAt field and the controller will believe that to be the last time the record was reconciled. As for the ValidFor: since it is a simple string it is possible to set an incorrect value. If we fail to parse it we treat the ValidFor as 0. This means that the controller will believe that the information in the record is expired and will probe the DNS provider for an update. If a valid value is provided controller will obey it. Eventually, the controller will naturally enqueue the record and those values will be overridden.

-

In case the controller fails to retain changes in the DNS Provider: write are successful, but the validation fails again and the WriteCounter reaches the WriteCounterLimit we give up on the reconciliation. The appropriate message will be put under the Ready - false condition as well as in the logs of the controller. The reconciliation will resume once the generation of the DNS Record is changed.

-

Metrics

-

There is a metric emitted from the controller: dns_provider_write_counter. It reflects the WriteCounter field in the status of the record.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/dns-operator/docs/reference/dnsrecord/index.html b/dev/dns-operator/docs/reference/dnsrecord/index.html deleted file mode 100644 index 99fd8975..00000000 --- a/dev/dns-operator/docs/reference/dnsrecord/index.html +++ /dev/null @@ -1,1639 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - The DNSRecord Custom Resource Definition (CRD) - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

The DNSRecord Custom Resource Definition (CRD)

- -

DNSRecord

- - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeRequiredDescription
specDNSRecordSpecYesThe specification for DNSRecord custom resource
statusDNSRecordStatusNoThe status for the custom resource
-

DNSRecordSpec

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeRequiredDescription
ownerIDStringNoUnique string used to identify the owner of this record. If unset an ownerID will be generated based on the record UID
rootHostStringYesSingle root host of all endpoints in a DNSRecord
providerRefProviderRefYesReference to a DNS Provider Secret
endpoints[]ExternalDNS EndpointNoEndpoints to manage in the dns provider
healthCheckHealthCheckSpecNoHealth check configuration
-

ProviderRef

- - - - - - - - - - - - - - - - - -
FieldTypeRequiredDescription
nameStringYesName of a dns provider secret
-

HealthCheckSpec

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeRequiredDescription
endpointStringYesEndpoint is the path to append to the host to reach the expected health check
portNumberYesPort to connect to the host on
protocolStringYesProtocol to use when connecting to the host, valid values are "HTTP" or "HTTPS"
failureThresholdNumberYesFailureThreshold is a limit of consecutive failures that must occur for a host to be considered unhealthy
-

DNSRecordStatus

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescription
observedGenerationStringNumber of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec
conditions[]Kubernetes meta/v1.ConditionList of conditions that define the status of the resource
queuedAtKubernetes meta/v1.TimeQueuedAt is a time when DNS record was received for the reconciliation
validForStringValidFor indicates duration since the last reconciliation we consider data in the record to be valid
writeCounterNumberWriteCounter represent a number of consecutive write attempts on the same generation of the record
endpoints[]ExternalDNS EndpointEndpoints are the last endpoints that were successfully published by the provider
healthCheckHealthCheckStatusHealth check status
ownerIDStringUnique string used to identify the owner of this record
-

HealthCheckStatus

- - - - - - - - - - - - - - - - - - - - -
FieldTypeDescription
conditions[]Kubernetes meta/v1.ConditionList of conditions that define that status of the health checks
probes[]HealthCheckStatusProbeHealth check Probe status
-

HealthCheckStatusProbe

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
FieldTypeDescription
idStringThe health check id
ipAddressStringThe ip address being monitored
hostStringThe host being monitored
syncedBooleanSynced
conditions[]Kubernetes meta/v1.ConditionList of conditions that define that status of the probe
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/dns-operator/index.html b/dev/dns-operator/index.html deleted file mode 100644 index b79e6bf0..00000000 --- a/dev/dns-operator/index.html +++ /dev/null @@ -1,1667 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - DNS Operator - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - - Skip to content - - -
-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

DNS Operator

-

FOSSA Status

-

The DNS Operator is a kubernetes based controller responsible for reconciling DNS Record custom resources. It interfaces with cloud DNS providers such as AWS, Google and Azure to bring the DNS zone into the state declared in these CRDs. -One of the key use cases the DNS operator solves, is allowing complex DNS routing strategies such as Geo and Weighted to be expressed. This allows you to leverage DNS as the first layer of traffic management. These strategies increase in value as you works across multiple clusters. DNS operator can be deployed to multiple cluster and coordinate on a given zone allowing you to use a shared domain name to balance traffic based on your requirements.

-

Getting Started

-

Pre Setup

-

Add DNS provider configuration

-

NOTE: You can optionally skip this step but at least one DNS Provider Secret will need to be configured with valid credentials to use the DNS Operator.

-
AWS Provider (Route53)
-

make local-setup-aws-clean local-setup-aws-generate AWS_ACCESS_KEY_ID=<My AWS ACCESS KEY> AWS_SECRET_ACCESS_KEY=<My AWS Secret Access Key>
-
-More details about the AWS provider can be found here

-
GCP Provider
-

make local-setup-gcp-clean local-setup-gcp-generate GCP_GOOGLE_CREDENTIALS='<My GCP Credentials.json>' GCP_PROJECT_ID=<My GCP PROJECT ID>
-
-More details about the GCP provider can be found here

-
AZURE Provider
-
make local-setup-azure-clean local-setup-azure-generate KUADRANT_AZURE_CREDENTIALS='<My Azure Credentials.json>'
-
-

Info on generating service principal credentials here

-

Get your resource group ID like so: -

az group show --resource-group <resource group name> | jq ".id" -r
-

-

Also give traffic manager contributor role: -

az role assignment create --role "Traffic Manager Contributor" --assignee $EXTERNALDNS_SP_APP_ID --scope <RESOURCE_GROUP_ID>
-

-

Getting the zone ID can be achieved using the below command: -

az network dns zone show --name <my domain name> --resource-group <my resource group> --query "{id:id,domain:name}"
-

-

Running controller locally (default)

-
    -
  1. -

    Create local environment(creates kind cluster) -

    make local-setup
    -

    -
  2. -
  3. -

    Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running):

    -
  4. -
-
make run
-
-

Running controller on the cluster

-
    -
  1. -

    Create local environment(creates kind cluster) -

    make local-setup DEPLOY=true
    -

    -
  2. -
  3. -

    Verify controller deployment -

    kubectl logs -f deployments/dns-operator-controller-manager -n dns-operator-system
    -

    -
  4. -
-

Running controller on existing cluster

-

You’ll need a Kubernetes cluster to run against. You can use KIND to get a local cluster for testing, or run against a remote cluster. -Note: Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster kubectl cluster-info shows).

-
    -
  1. -

    Apply Operator manifests -

    kustomize build config/default | kubectl apply -f -
    -

    -
  2. -
  3. -

    Verify controller deployment -

    kubectl logs -f deployments/dns-operator-controller-manager -n dns-operator-system
    -

    -
  4. -
-

Development

-

E2E Test Suite

-

The e2e test suite can be executed against any cluster running the DNS Operator with configuration added for any supported provider.

-
make test-e2e TEST_DNS_ZONE_DOMAIN_NAME=<My domain name> TEST_DNS_PROVIDER_SECRET_NAME=<My provider secret name> TEST_DNS_NAMESPACES=<My test namespace(s)>
-
- - - - - - - - - - - - - - - - - - - - - -
Environment VariableDescription
TEST_DNS_PROVIDER_SECRET_NAMEName of the provider secret to use. If using local-setup provider secrets zones, one of [dns-provider-credentials-aws; dns-provider-credentials-gcp;dns-provider-credentials-azure]
TEST_DNS_ZONE_DOMAIN_NAMEThe Domain name to use in the test. Must be a zone accessible with the (TEST_DNS_PROVIDER_SECRET_NAME) credentials with the same domain name
TEST_DNS_NAMESPACESThe namespace(s) where the provider secret(s) can be found
-

Modifying the API definitions

-

If you are editing the API definitions, generate the manifests such as CRs or CRDs using:

-
make manifests
-
-

NOTE: Run make --help for more information on all potential make targets

-

More information can be found via the Kubebuilder Documentation

-

Logging

-

Logs are following the general guidelines:

-
    -
  • logger.Info() describe a high-level state of the resource such as creation, deletion and which reconciliation path was taken.
  • -
  • logger.Error() describe only those errors that are not returned in the result of the reconciliation. If error is occurred there should be only one error message.
  • -
  • logger.V(1).Info() debug level logs to give information about every change or event caused by the resource as well as every update of the resource.
  • -
-

The --zap-devel argument will enable debug level logs for the output. Otherwise, all V() logs are ignored.

-

Common metadata

-

Not exhaustive list of metadata for DNSRecord controller:

-
    -
  • level - logging level. Values are: info,debug or error
  • -
  • ts - timestamp
  • -
  • logger - logger name
  • -
  • msg
  • -
  • controller and controllerKind - controller name, and it's kind respectively to output the log
  • -
  • DNSRecord - name and namespace of the DNS Record CR that is being reconciled
  • -
  • reconcileID
  • -
  • ownerID - ID the of owner of the DNS Record
  • -
  • txtPrefix/txtSuffix - prefix and suffix of the TXT record in provider.
  • -
  • zoneEndpoints - endpoints that exist in the provider
  • -
  • specEdnoinds - endpoints defined in the spec
  • -
  • statusEndpoints - endpoints that were processed previously
  • -
-
-

Note that not all the metadata values are present at each of the logs statements.

-
-

Examples

-

To query logs locally you can use jq. For example: -Retrieve logs by -

kubectl get deployments -l app.kubernetes.io/part-of=dns-operator -A
-
-NAMESPACE             NAME                              READY 
-dns-operator-system   dns-operator-controller-manager   1/1   
-
-And query them. For example: -
kubectl logs -l control-plane=dns-operator-controller-manager -n dns-operator-system --tail -1 | sed '/^{/!d' | jq 'select(.controller=="dnsrecord" and .level=="info")'
-
-or -
kubectl logs -l control-plane=dns-operator-controller-manager -n dns-operator-system --tail -1 | sed '/^{/!d' | jq 'select(.controller=="dnsrecord" and .DNSRecord.name=="test" and .reconcileID=="2be16b6d-b90f-430e-9996-8b5ec4855d53")' | jq '.level, .msg, .zoneEndpoints, .specEndpoints, .statusEndpoints '
-
-You could use selector in the jq with and/not/or to restrict.

-

License

-

Copyright 2024.

-

Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at

-
http://www.apache.org/licenses/LICENSE-2.0
-
-

Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.

-

FOSSA Status

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/install-olm/index.html b/dev/install-olm/index.html index 6f5aa566..ec434b59 100644 --- a/dev/install-olm/index.html +++ b/dev/install-olm/index.html @@ -2082,7 +2082,7 @@

Set up observability (OpenShift Onl kubectl apply -k config/observability/openshift/grafana

Create the example dashboards in Grafana

-
kubectl apply -k https://github.com/Kuadrant/kuadrant-operator//examples/dashboards?ref=v1.0.1
+
kubectl apply -k https://github.com/Kuadrant/kuadrant-operator/examples/dashboards?ref=v1.0.1
 

Access the Grafana UI, using the default user/pass of root/secret. You should see the example dashboards in the 'monitoring' folder. diff --git a/dev/kuadrant-operator/config/samples/kuadrant_v1_authpolicy.yaml b/dev/kuadrant-operator/config/samples/kuadrant_v1_authpolicy.yaml deleted file mode 100644 index 48311305..00000000 --- a/dev/kuadrant-operator/config/samples/kuadrant_v1_authpolicy.yaml +++ /dev/null @@ -1,18 +0,0 @@ ---- -apiVersion: kuadrant.io/v1 -kind: AuthPolicy -metadata: - name: authpolicy-sample -spec: - targetRef: - group: gateway.networking.k8s.io - kind: HTTPRoute - name: toystore - rules: - authentication: - "apikey": - apiKey: - selector: {} - credentials: - authorizationHeader: - prefix: APIKEY diff --git a/dev/kuadrant-operator/config/samples/kuadrant_v1_dnspolicy.yaml b/dev/kuadrant-operator/config/samples/kuadrant_v1_dnspolicy.yaml deleted file mode 100644 index 0f669a71..00000000 --- a/dev/kuadrant-operator/config/samples/kuadrant_v1_dnspolicy.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: kuadrant.io/v1 -kind: DNSPolicy -metadata: - name: dnspolicy-sample -spec: - targetRef: - name: example-gateway - group: gateway.networking.k8s.io - kind: Gateway - healthCheck: - protocol: HTTP - providerRefs: - - name: "provider-ref" diff --git a/dev/kuadrant-operator/config/samples/kuadrant_v1_ratelimitpolicy.yaml b/dev/kuadrant-operator/config/samples/kuadrant_v1_ratelimitpolicy.yaml deleted file mode 100644 index f5f6e382..00000000 --- a/dev/kuadrant-operator/config/samples/kuadrant_v1_ratelimitpolicy.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: kuadrant.io/v1 -kind: RateLimitPolicy -metadata: - name: ratelimitpolicy-sample -spec: - targetRef: - group: gateway.networking.k8s.io - kind: HTTPRoute - name: toystore - limits: - toys: - rates: - - limit: 50 - window: 1m diff --git a/dev/kuadrant-operator/config/samples/kuadrant_v1_tlspolicy.yaml b/dev/kuadrant-operator/config/samples/kuadrant_v1_tlspolicy.yaml deleted file mode 100644 index 038327f0..00000000 --- a/dev/kuadrant-operator/config/samples/kuadrant_v1_tlspolicy.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: kuadrant.io/v1 -kind: TLSPolicy -metadata: - name: tlspolicy-sample -spec: - targetRef: - name: example-gateway - group: gateway.networking.k8s.io - kind: Gateway - issuerRef: - group: cert-manager.io - kind: ClusterIssuer - name: self-signed-ca diff --git a/dev/kuadrant-operator/config/samples/kuadrant_v1beta1_kuadrant.yaml b/dev/kuadrant-operator/config/samples/kuadrant_v1beta1_kuadrant.yaml deleted file mode 100644 index 29de2d9a..00000000 --- a/dev/kuadrant-operator/config/samples/kuadrant_v1beta1_kuadrant.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kuadrant.io/v1beta1 -kind: Kuadrant -metadata: - name: kuadrant-sample -spec: {} diff --git a/dev/kuadrant-operator/config/samples/kustomization.yaml b/dev/kuadrant-operator/config/samples/kustomization.yaml deleted file mode 100644 index 2670b77d..00000000 --- a/dev/kuadrant-operator/config/samples/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -## Append samples you want in your CSV to this file as resources ## -resources: -- kuadrant_v1beta1_kuadrant.yaml -- kuadrant_v1_authpolicy.yaml -- kuadrant_v1_ratelimitpolicy.yaml -- kuadrant_v1_dnspolicy.yaml -- kuadrant_v1_tlspolicy.yaml -#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/dev/kuadrant-operator/doc/images/kuadrant-architecture.svg b/dev/kuadrant-operator/doc/images/kuadrant-architecture.svg deleted file mode 100644 index 43e68124..00000000 --- a/dev/kuadrant-operator/doc/images/kuadrant-architecture.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/dev/kuadrant-operator/doc/install/install-make/index.html b/dev/kuadrant-operator/doc/install/install-make/index.html deleted file mode 100644 index 0825b902..00000000 --- a/dev/kuadrant-operator/doc/install/install-make/index.html +++ /dev/null @@ -1,1395 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Installing Kuadrant via make targets - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

-
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Installing Kuadrant via make targets

-

Overview

-

The following doc will show you how to install the Kuadrant Operator using make targets in the Kuadrant operator repo. What will be installed is Istio, Kubernetes Gateway API and Kuadrant itself.

-
-

Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.

-
-

Pre-requisites

- -

Setup

-

Clone the project: -

git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator
-

-

Setup the environment (This will also create a kind cluster. If your using Pod man use the env var CONTAINER_ENGINE=podman with the make target below.): -

make local-setup
-

-

Request an instance of Kuadrant: -

kubectl -n kuadrant-system apply -f - <<EOF
-apiVersion: kuadrant.io/v1beta1
-kind: Kuadrant
-metadata:
-  name: kuadrant
-spec: {}
-EOF
-

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrant-operator/doc/overviews/development/index.html b/dev/kuadrant-operator/doc/overviews/development/index.html deleted file mode 100644 index aca64566..00000000 --- a/dev/kuadrant-operator/doc/overviews/development/index.html +++ /dev/null @@ -1,1869 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Development Guide - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

Development Guide

-

Technology stack required for development

- -

Build

-
make build
-
-

Deploy on local kubernetes cluster

-

Run local Kubernetes cluster using Docker container using Kind and deploy kuadrant operator (and all dependencies) in a single command.

-
make local-setup
-
-

The make local-setup target accepts the following variables:

- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
GATEWAYAPI_PROVIDERGatewayAPI provider name. Accepted values: [istio | envoygateway]istio
-

Run as a local process

-

Run local Kubernetes cluster using Docker container using Kind and deploy all dependencies in a single command.

-
make local-env-setup
-
-

The make local-env-setup target accepts the following variables:

- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
GATEWAYAPI_PROVIDERGatewayAPI provider name. Accepted values: [istio | envoygateway]istio
-

Then, run the operator locally

-
make run
-
-

Deploy on existing kubernetes cluster

-

Requirements:

-
    -
  • Active session open to the kubernetes cluster.
  • -
  • GatewayAPI installed
  • -
  • GatewayAPI provider installed. Currently only Istio and EnvoyGateway supported.
  • -
  • Cert Manager installed
  • -
-

Before running the kuadrant operator, some dependencies needs to be deployed.

-
make install
-make deploy-dependencies
-
-

Then, deploy the operator

-
make deploy
-
-

Deploy kuadrant operator using OLM

-

You can deploy kuadrant using OLM just running few commands. -No need to build any image. Kuadrant engineering team provides latest and -release version tagged images. They are available in -the Quay.io/Kuadrant image repository.

-

Create kind cluster

-
make kind-create-cluster
-
-

Deploy OLM system

-
make install-olm
-
-

Deploy kuadrant using OLM. The make deploy-catalog target accepts the following variables:

- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
CATALOG_IMGKuadrant operator catalog image URLquay.io/kuadrant/kuadrant-operator-catalog:latest
-
make deploy-catalog [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-catalog:latest]
-
-

Build custom OLM catalog

-

If you want to deploy (using OLM) a custom kuadrant operator, you need to build your own catalog. -Furthermore, if you want to deploy a custom limitador or authorino operator, you also need -to build your own catalog. The kuadrant operator bundle includes the authorino or limtador operator -dependency version, hence using other than latest version requires a custom kuadrant operator -bundle and a custom catalog including the custom bundle.

-

Build kuadrant operator bundle image

-

The make bundle target accepts the following variables:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault valueNotes
IMGKuadrant operator image URLquay.io/kuadrant/kuadrant-operator:latestTAG var could be use to build this URL, defaults to latest if not provided
VERSIONBundle version0.0.0
LIMITADOR_OPERATOR_BUNDLE_IMGLimitador operator bundle URLquay.io/kuadrant/limitador-operator-bundle:latestLIMITADOR_OPERATOR_VERSION var could be used to build this, defaults to latest if not provided
AUTHORINO_OPERATOR_BUNDLE_IMGAuthorino operator bundle URLquay.io/kuadrant/authorino-operator-bundle:latestAUTHORINO_OPERATOR_VERSION var could be used to build this, defaults to latest if not provided
DNS_OPERATOR_BUNDLE_IMGDNS operator bundle URLquay.io/kuadrant/dns-operator-bundle:latestDNS_OPERATOR_BUNDLE_IMG var could be used to build this, defaults to latest if not provided
RELATED_IMAGE_WASMSHIMWASM shim image URLoci://quay.io/kuadrant/wasm-shim:latestWASM_SHIM_VERSION var could be used to build this, defaults to latest if not provided
RELATED_IMAGE_CONSOLEPLUGINConsolePlugin image URLquay.io/kuadrant/console-plugin:latest
CHANNELSBundle channels used in the bundle, comma separatedalpha
DEFAULT_CHANNELThe default channel used in the bundlealpha
-
    -
  • Build the bundle manifests
  • -
-
make bundle [IMG=quay.io/kuadrant/kuadrant-operator:latest] \
-            [VERSION=0.0.0] \
-            [LIMITADOR_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \
-            [AUTHORINO_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/authorino-operator-bundle:latest] \
-            [DNS_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/dns-operator-bundle:latest] \
-            [RELATED_IMAGE_WASMSHIM=oci://quay.io/kuadrant/wasm-shim:latest] \
-            [RELATED_IMAGE_CONSOLEPLUGIN=quay.io/kuadrant/console-plugin:latest] \
-            [CHANNELS=alpha] \
-            [DEFAULT_CHANNEL=alpha]
-
-
    -
  • Build the bundle image from the manifests
  • -
- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
BUNDLE_IMGKuadrant operator bundle image URLquay.io/kuadrant/kuadrant-operator-bundle:latest
-
make bundle-build [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]
-
-
    -
  • Push the bundle image to a registry
  • -
- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
BUNDLE_IMGKuadrant operator bundle image URLquay.io/kuadrant/kuadrant-operator-bundle:latest
-
make bundle-push [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]
-
-

Frequently, you may need to build custom kuadrant bundle with the default (latest) Limitador and -Authorino bundles. These are the example commands to build the manifests, build the bundle image -and push to the registry.

-

In the example, a new kuadrant operator bundle version 0.8.0 will be created that references -the kuadrant operator image quay.io/kuadrant/kuadrant-operator:v0.5.0 and latest Limitador and -Authorino bundles.

-
# manifests
-make bundle IMG=quay.io/kuadrant/kuadrant-operator:v0.5.0 VERSION=0.8.0
-
-# bundle image
-make bundle-build BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:my-bundle
-
-# push bundle image
-make bundle-push BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:my-bundle
-
-

Build custom catalog

-

The catalog's format will be File-based Catalog.

-

Make sure all the required bundles are pushed to the registry. It is required by the opm tool.

-

The make catalog target accepts the following variables:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
BUNDLE_IMGKuadrant operator bundle image URLquay.io/kuadrant/kuadrant-operator-bundle:latest
LIMITADOR_OPERATOR_BUNDLE_IMGLimitador operator bundle URLquay.io/kuadrant/limitador-operator-bundle:latest
AUTHORINO_OPERATOR_BUNDLE_IMGAuthorino operator bundle URLquay.io/kuadrant/authorino-operator-bundle:latest
DNS_OPERATOR_BUNDLE_IMGDNS operator bundle URLquay.io/kuadrant/dns-operator-bundle:latest
DEFAULT_CHANNELCatalog default channelalpha
-
make catalog [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest] \
-            [LIMITADOR_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \
-            [AUTHORINO_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/authorino-operator-bundle:latest] \
-            [DNS_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/dns-operator-bundle:latest] \
-            [DEFAULT_CHANNEL=alpha]
-
-
    -
  • Build the catalog image from the manifests
  • -
- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
CATALOG_IMGKuadrant operator catalog image URLquay.io/kuadrant/kuadrant-operator-catalog:latest
-
make catalog-build [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-catalog:latest]
-
-
    -
  • Push the catalog image to a registry
  • -
-
make catalog-push [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]
-
-

You can try out your custom catalog image following the steps of the -Deploy kuadrant operator using OLM section.

-

Cleaning up

-
make local-cleanup
-
-

Run tests

-

Unittests

-
make test-unit
-
-

Optionally, add TEST_NAME makefile variable to run specific test

-
make test-unit TEST_NAME=TestLimitIndexEquals
-
-

or even subtest

-
make test-unit TEST_NAME=TestLimitIndexEquals/empty_indexes_are_equal
-
-

Integration tests

-

Multiple controller integration tests are defined

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Golang packageRequired environmentMakefile env setup targetMakefile test run target
github.com/kuadrant/kuadrant-operator/tests/bare_k8sno gateway provider, no GatewayAPI CRDs. Just Kuadrant API and Kuadrant dependencies.make local-k8s-env-setupmake test-bare-k8s-integration
github.com/kuadrant/kuadrant-operator/tests/gatewayapino gateway provider. GatewayAPI CRDs, Kuadrant API and Kuadrant dependencies.make local-gatewayapi-env-setupmake test-gatewayapi-env-integration
github.com/kuadrant/kuadrant-operator/controllersat least one gatewayapi provider. It can be any: istio, envoygateway, ...make local-env-setup GATEWAYAPI_PROVIDER=[istio \| envoygateway] [ISTIO_INSTALL_SAIL=false] (Default istio)make test-integration GATEWAYAPI_PROVIDER=[istio \| envoygateway] (Default istio)
github.com/kuadrant/kuadrant-operator/tests/istioGatewayAPI CRDs, Istio, Kuadrant API and Kuadrant dependencies.make local-env-setup GATEWAYAPI_PROVIDER=istio [ISTIO_INSTALL_SAIL=false]make test-istio-env-integration
github.com/kuadrant/kuadrant-operator/tests/envoygatewayGatewayAPI CRDs, EnvoyGateway, Kuadrant API and Kuadrant dependencies.make local-env-setup GATEWAYAPI_PROVIDER=envoygatewaymake test-envoygateway-env-integration
-

Lint tests

-
make run-lint
-
-

(Un)Install Kuadrant CRDs

-

You need an active session open to a kubernetes cluster.

-

Remove CRDs

-
make uninstall
-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrant-operator/doc/overviews/logging/index.html b/dev/kuadrant-operator/doc/overviews/logging/index.html deleted file mode 100644 index e01e5c73..00000000 --- a/dev/kuadrant-operator/doc/overviews/logging/index.html +++ /dev/null @@ -1,1343 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Logging - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Logging

-

The kuadrant operator outputs 3 levels of log messages: (from lowest to highest level)

-
    -
  1. debug
  2. -
  3. info (default)
  4. -
  5. error
  6. -
-

info logging is restricted to high-level information. Actions like creating, deleteing or updating kubernetes resources will be logged with reduced details about the corresponding objects, and without any further detailed logs of the steps in between, except for errors.

-

Only debug logging will include processing details.

-

To configure the desired log level, set the environment variable LOG_LEVEL to one of the supported values listed above. Default log level is info.

-

Apart from log level, the operator can output messages to the logs in 2 different formats:

-
    -
  • production (default): each line is a parseable JSON object with properties {"level":string, "ts":int, "msg":string, "logger":string, extra values...}
  • -
  • development: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\t<log-level>\t<logger>\t<message>\t{extra-values-as-json}
  • -
-

To configure the desired log mode, set the environment variable LOG_MODE to one of the supported values listed above. Default log level is production.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrant-operator/doc/proposals/images/authpolicy-control-structure.png b/dev/kuadrant-operator/doc/proposals/images/authpolicy-control-structure.png deleted file mode 100644 index ee6e621ef585aa959d1307102111ccc3fe167529..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 60035 zcmYIw2Rzm7`~M+(C&>z-?7cUMD3QwETiILoh-5^ztPrwS_THN$D};oQoxT3o$@BgF zpVxUkbs z&GD|nGc5Se6U!tR-jh4YYB)W$HFI(`bTCDj+t^y0vO5|(n3~!+TG%>mUTKnmi`bDD z$vBuAIz6|wp?&t;+7zK~YC_8+O#8suhL(qmhliF+K!lf1gom5<@grJ=r_XBdso;`T#?kapA=!#k_*3+$73@D20|uZKRy z8q?U1{(XSe-o%t!hWat_UHf}ME4OQB-Ewpq+HosssQQY%mC5(N(5-aGqJ{hM?M&S> zi{@!Cu3wEtcj*G`pk(J%i_i)FNOC&M59nmgDuW~whu2rgDt2pp3|}ILwY)jQqDD(x|ZEYt+C0iWv zZxM|6WiSx$1hqPva&xgU390X?s*XnWar5x_?=BBz*3~89q5NC9VAOOrsrKaVGQ6A- z?3|G|;go_#PEI5#DJi#A&i$U@Nt>H9%H6+jKO}x&H)*2D@kZ=&eyrFqzXGO^*4VY3 z9fz>DZ-2(B>m|nPUlQIWV@fBZBA7B4y@eBgMtQ#I&Dl zM&T0>Xx%)B*J8MQ22)IQlxtL~s`D;&+#%KliTRYQ?R-I6oPl zntJ#;X&R#B0*N4b33 za+nh>2~K);HpbrG-r-yT;e)uDoCys!QqQdx%umI|_a8mNznlIzSN8HDn1p|!e(qEu zLFeb^7joO89ZVK}`of+xJYLToyDNg`J_B(&@0H8P9f?FK2{L>xv9`80JUOWeSEaIT2d=H#&CmH}_B@B+rGs4_tk2ypw-bcIDr4lrDXs zo#PM6%d-QYEWRaRqmUtZ4P!mdBJ^?B1jSu{Hg{Xqq`h>iWopNP0)tp>h?#7vlFogX#!aAbh zDhmq>+_s_WH8Jif?DB^U>;o*|o~GaBO!mGIG*d`LF`VwiH{G+ew1i>eK37&6y?BAs zVo>re3LJiJ3yFw`eEI&p)~x)zKadtn$o?l9d_ok_^^l$5Uvy~CxQo%u zazD=Wbib;pBVFe81NPpsr7!6+hwHXJp>{$#FH|mfi}X4VLQS}#p<$xjTsgNxRPNTq^7V);wi zNd4P{f)`Bqzj4424Lv=lw^ErxxRM4+vVs;rxL+4jV z^FI|9%IfOsDwsZh{=8+T&b0^YpH~`t1v-AgPtp*tKuytJv%!OQ+))R^#N3dc=bX`%1!rNpHL5=7NfM<4V(Bl&n%q%O5 za=5qw@#0f{e&#OXa_cG{6K%0blgL73k|z3)$8wR#M1>Rj&%z+Ki6ZhLc83{oVHJ;u z!boEK6UOAG#Bb1Q>*|JQXCv;>p-2-wR#goQz$9!nD&2CpcJ12ljSUP$vXFgiNu!Rb zQ7IV)1_oq2f4DTX0~IzQJ3HHds^eEmLV_OoSy)8G0&H)vTqfj|fq?-cS?)|LS|SAEb4A51A}rXg7PIpC zt@$>)x*7DU=*#6(kSFe=uA50ki?p(|Oe+8L?b|bADPpOeli3Sz2Q#xEpCaA5xX)UJ zr;R-(KP7SfBkSF^Gm6CO(!5i=l+q+p&>{{A_>2 z;QQ;%HAbiL`}glhtE#F7ZK6b4y?D{o;$(&-+J|CGiozH#Y?f@^f-95l)QwX<{#lIUD{GE8e-rp;sTA zm`Lsk@q_H%V{f`qdv&>oYaApWiDib$+z8NGB-XadLHC_|+2~ zH00vy3Wh=)9UYAfzgvs(`WcYfU{dVt?D7o6LC)}CKj&Hks+UteA{FmIw8xbuf~Yk1 z?E|Dx@Q&bnbQZR@Idg{paz>nBR;5&M4YLWRl(t^om1ea`2oLz`yOU>-q-XMk%+v=) z)V~+&)^(~cPFD2v^n9enQb6>yw6uhY++k!qdQiXLI7}b~+0n8iF$)T?V>p2vICc+Ox-` z7m0;qG$KyjC2V!np&f_(p&kCcy$=|O--PKRmQIFTun}9;$WPil&;4n)cD}oZ862c} zA3tw1Sb{mjx}hR|xUUt4olK@0uTznc&5cj&K6>;hXh?{P0FaBD7=dOS??!Z!_KYX+{G*FZeZYRj=#FO*#%a{!pd5c z{-My^(^D+pwo&D#oKA!Lb;QmC(>&Fs<<{;G1`g%A_;Ori{Y?2xIE9D*09CP&K98zlevHFx$RLQ$HwX*bx(D?ZI)9dOeZ5#GT32utC&-i_P>R#p!rd(c2@q9KZ zw70ic*p+1|V0bW~$E5Y|Up{cNa9EDhf!S01!4%Qg@(fWbW4{^9E+I!O8ieVv$4qF5 z!zIC{9#_QI*mWHnemSlyA98Z;nJ!!L$X(LZOf)3ox=eBp{PBFRhh;E9MEkwA z7BIdRIKGCC;N#;n7%$QlalNq=H8L|34lr`@Z5ch6!m_fjTyui3C}b!QVo$X!Em@nqm-@+( zSA~Vm9d0kcvV8jVY3`swkMo&o;OL7i-AlNtCXqEL^PnHUZ!K*or=~_&n4douBIwBC z-|?j>%*cYgya&`GPE3>EzO_3ZsH>?_EDesQV*7rczK~l4_$Vm}ne4WKTj?f!NH7IKN{BTc&?p? zV)S)k8+@74w(rZANE;oEBYB1=@8A4eJv44YCJci>;|eXq@=1g8$+>N}L!639t3j*` z0O*j5Z@#@L8U%4}hCmwf3IM}sexY%I_m!2T>cx8eI%nf?CzC_oLD7uLKMSjV-1YI1 zbge7ZV6(8cPI25zn|G+OJ3Dqh{e54uwYeEFv~SFlxD;~$!3Oa}l(ZlvHZ~TnkdsK;(K1R8S_lfwUpYV$}Bm3PaK{`%O>y z(cfagtek_kastW>*bp`$CNeI9xkt;}T{K;z`RevR+p#y6u7!3$f{j&wFbq#4w$n{d zMn=|LX+0&s!iaQqqfu!{(j-X(7!b!cHZ}?{q$<}s?h(DpV7id9>5#k;_>CsC1BwMC zetS78dl?O<(`n~a6^)JQm2DM%-B(s7q@bjH$d2(J`a+IK2GHT{TLMKzMYg=8p%=*F zhn}8ZES5!@sQrbs3d!HSy_KR#D}jl17YAoHI&h(wlPH%vFZn`l}Lh@@Vq zw6D9gSqaFH$fmA9wkL!~3CV!LNII|{|EnxMuhw@1gu=tezdmY5KSn@u<=?mv5qjKt zIN$l#TtL|Zt3peL=djpRSct#vMg=2BF|@3^$V4%`YBPjeVQIlQ(uUMyYE@O)dp*?X z%Gfv)=dVR{jZYKcs)%(XG!`L&z;aRv*?sty!DdN7B?POcgaKs&YpR>rTPa!uZPw}X z6JlAFdqPwg2q$OfThtMpey{T5;ady7+TR!PT!Q1UY29V@pPaAeUvogwx`GxU4TzDJ zH1s++#Drj)Q)8^&1zNlELxckRW=rBD>F8?4eFC#x6w~TFV zLzIdDkmH(v@D)FVARVAH=b&IxsMy2hYIaycGUn;H-=gXt&SvWwHY?g43KCE-*zBjkC=<=Zb&Ica5KY1#K(X(=Q-1vi6pV{d=|4hjP$1;x@# zT-$3x^LqN5`;Dcgr3I!5Y?Bgl)_p-UHp-~-l@%4YyE;3g{Ao>YuE{ePO*82C8CiX0E{?)w~);7;YB~ys^y{37IF#mr3W6P=cMHc1m zdHMOy#*zX8on5puTU3;k3uhx=zBH#Ly?_5efRE2aj29*l9`<)ftLDZ-NkB9B!((G{ zFd=}TJyEPb`sR6sg%ja@?S&PDSv4N5MGjqN4w7tI%|f-d6D!jxD*?*hNE?JjtR4Nr zEB0s4V^e_~kTHFjFGj&!w!*r5RC!xh(=5c|_yp)P^;L($x$;K&P5PkG7mvQL@7H&+ z%pkt#l}}oc2ZgI7B}`Ac1Sc3qd8ak>S#%}uLHR=7iMQsl?)inKNiKi-d(DRwRz(c@ zfLut%n!37$l!S$J)_h8T8eP0gh2smU3k^Rt3>^=2Z>79{{}Y45k~3d438iPFZpNnV z1Q&qLID{hzK{!~ci;%;J(EQ4nfWKhrS<0ls37m5h!I%85C`BEC3FfUCpqS|6(5UI7 zVRcs^&3(yduoqQ7%^V_?bK`1%(|c2quf3m`aGt5kIy>`k26#?I5v)c0y+Ws|x`>&- zh;wMVXK76Q8JrEm(1%^~yXlZ}xMJd{*_NG*`Bi!gro>_H@nT3;8Mf8B9i2R&16!f%_g= z>+}LNVP($~2h3jGy>*Z7s>E1FhUUal_mQRiI!%ZV?xH=#_#oVGbgejWLI^M0lZEYA-@$MF1jrF0>5z+CPKm}uMt z+EjBHRkTAR-{izo0u2=fo(x{P`b&ADQ{qsEudcqpBP3kI!&$F}0RZVOC$r||?JSq? zc^x~C6~V9n7_(vud2%ZA;D)KbpT=0pdiCJoU{u|F%+Ha@N#W&6c1Yh3(1ra;0DR%f zFc1%WmDO;Dg@@msv8ghQq;q^OuYS9%2EnZ!>nZWweB_FIVkm~AtlVE~Sv;kXkuNV0~OfJBJKAEjb08U%NP` z>Y{pzXH>&}@k9lnYb@_7xL&-lwH*(SuhfCMLc8d%_efkKI3`51HeWsK8LK}Idlgq# zxY-=NNGXPl(SU)H7NaHvi+lJG7fsr?Z;_=!{-GQjsSLdT{{1^%)fHsW60wryr6QoL zRR@YLEY>_z^^vR!n09&|3*0lc9yDMU&^GToU?w9*{d#1jUz?-y$&cx2vE^C)^}Jg6 zo)&A`)W2}6{JLt@R5*|wK#2fvEvw?D^;8ak8&Hq7p)deb$jge3b(c&?kzQV2SQr7* zx(rDtAu8GvEmn{P%h>3t2#h^FlMn1|Z8!ag2(SY063M=+LpAl#XCmKD=+QkJCqi%1e`ZjLq&P&)h93|^==y-xPi zYibft&$0bAD!@Vk&H#X8?{)^#Zlj!r;5Dy~E{-@~Sk0SHc_KNX4@7{`goo4dGaH|1 z@{{tVL`Y@pOISX1cCIMWk+23-RV#$Hbk0i?I0N8m-NjMHpPV!Ou|*4pR4wacfpLbW zSwaNR0D{jN<9{-kTc66OJNmVdI4DF_508wF2EKj!Htb@-v4)@ffcwt1(A$6;9+TY8 zqzOM}oY@=vEt(@9P1`WAyB-iICEaXEwvU<9*hmfdGmF$*B{3nPd1IyyO1VQCTS(TU z!2lf~6hQ$qzLxSd{i*8c7tBQF4x6f}$BArwLMm%bX^-vz2U=FfLp|MR(98Wa<1_22 z;>!@ZOwF^4#HKyZsn#Dia007a6NY-7g{V4@_twfc+Dq0tp456t(EM=`MUWN!v?P@3 zyVE3)YE=5=t!9zI0*qGF&l=(%5`t5p-z1S)FkrS|Z1OFBMdBPUo3KRpmK2SQ5ubV) z`L)r}(c0w^Y>WV?3972n)1?_@L;xa!w?plvL6RAU=gY6eMfPg+MaTKsiF)DHX0>w& z8_X8~h21qeug+O!<>u$J%Df5gTKK)XdKU~)UF)+Aui_E+6Rb&vk{aM;YG*Y|vh(sR ztgQI@f40WtI|U}54X!FS4gHt`jU|8deJN_+cO#SE-DSz|78!45>y+tUw+>3z3C`$V z(MjRn@z}tPudlBS^XT<8irIfhV*O%NzKoA|>8^Zsc5!w#4MRV9 zi^#>33w5Bg8_vWDzK$vSvDHeTxVCu?;?u`TDR2u5ZFVo5h_VPZau zfZ&k;+X_r+^2OXVTkWz@eR*PR(tCKmRq9a;5*Yrpzq}@%7SS58e?OO3-*SM`Xwq{&>XROh%mOP9 zD)vcq*1G_xKP#wsU|n0UmBp@YmRj?@mBctxrYTCHVMp_~{}XNPBtuf#b?lO($YPWa z>`6s#=4(EH*TU}6X(~%`RH8JSaQ9-xSaST`-={D`G1{T*F|C^7?bIiFg^c9*U0**D z2&jKPGdHoK7~loFan>`lRCaXS&0t32Qq4HU?AvFz$N0+FGC@ugNW#CtsrRbGh`B-@ z9Y_kKv;bM@#_f_2s7xz9ILmPNV#MIPRL4zPaWH*yE21^o*Ha<6(KT@EeoA0vpy|+a z8=KTBQRcoXLIZ`VWPAY{oJ8DE^tB{UQx(NDY z`abg@^(GShxF=7D&~&Qp{LUjZ4JVMb_r9#Kf@_uxH{zZ&WGjUzqHwuV^C9YQJ=R1V-pC z5gL#HNFrZ~#hFp`O~M%!kget6;o+d3e*Al>1G5pqB4%Xdv6yIr03{~5i7YoZ992|R z(E;s1gazqXg#_eA<`41z2>p!U$3$Ez&-%@HVc%Q{qfE)6=#<} z1~jwn0UraeHr>#@upPOhOw?WmNjbBi0IyK;96Q_RjPm^X*|zfIgHPfMTENL|d~bg3 zlVopwJl-U&sY#9@ajHF2QG0)#e2+|*H?J8VlV$mDEQC28?(oqn+9D$w!5=E=>MtBv|~hIJeQhNJB2?U0?F1gAwafFoAJ?j z!40;)(|K~%m5zUMCbx1ADJKQTzV2G^JN#XAIjUe6s`=IoJLTc##Bwc<=Wr<8UQe2q#cW!|*k!-qjt74{q#J+I`u%t^N6>jZzK`nY&q$Iid8l?Ky~WEsi8YO! zs-}sO^JH$W?uRJki1s|7lG-*8GC_FJ&p zRS(!ZlRX7Q{MDzFW3|GBYkbVvH)b`pqiXf7B~7d8>g=PhA8ZzMbUqp*mahHKLmSDh~1)_MHHIu_UcLZ{?h$DA0R(CXFf(Zdw#yVA|=d9+jnZEJgXRaJop z16@yARTbDXH!*4mQBdL~3@k-;Gb#`RH^}rW2%;$jf5{6Iw2L}XN7EFhWc436*($Ke ztM>(3Dynk78O6jb@^IY$$Qycmo<*_tl>J@i%E>`I1TVu|1VJKuS zQt%K)9Sa91C?8!=?y2-w)8GUeRA>zYFm@A5wU?xhpr;6@Xw+6AVG7~N!2>puBHYYt zNat6SG;Y3(>gdZ+8KQeDHKHlaq4xRGt`RP*4@nTMD@efabXxyanak?(WBDE_Wid|fp_HD+ziE^~<`_OTLxZU()X-~M> z?Tb+>OcGo{Ze*dNekjajWPd7uo$+O|zhmWQnRuH)DNO;*XUBY^MV|5GmnQ?eo&;Qp zTB&*CH4~?ElU{KD-8uP(VcwaWH+1NQXY6T!d_=N(U$PYFsJlPwEImal!Fv0S$D z@wV++DlksL9ZQq5kq`9D2|=$xTxEU#PpwnY)y03^_O60T+s|CsF7cs~tt}@I;wVTt z6LK5E7pPZ-sKZ^ea&o|BB5R4UxEKKDW_mN&l7)MK-V7{Gb?)s^PzirXqy&Vncyz@<$@q4#dyFDGv`b^COG@E_CjZ{kWK9 zWYYI7-Z~Staxtty*08ZTVMwhG>O99!rr$ zr(gziJ5~a5G5VG0Rg8>V+xOMC1D5xVFL*KMZADX5hWedQAr9>#x)WFKjqP)%{juVb zRa$%KW$u9b$mdXgwW0hhFLr$=%VxUT`?{B8U2guu*l(DVqcaZFe4amY>Df4qxaI3QQXhhYu}2oPkOD4Z33R zq#m7)C~xgka?&R%w(hli!*G$scysV$p8G40PCG2cYiq-n#ycO8PFJ7rO^bYWahK-c z)RIjbnDsgQ%|hs~?rDx76npks+#`wT%nmV3$UD_xVtTOu$E%i>TtjLT9!|Sb^=DoA zS+2GDfxdoQ&?8d#kzEQ)>zh^ue&47JjL&=;#!~j`VmWuul-NlCbX>=OnhZdF(BXR< z=jY4*2+#m_XRd(kQB=f1CE-a1Y*=0%9T{Hh+TPBI)C+UzCmZHBbh+RQ_yzfH$GnO%99*N_1EKkgf#)f%T~dT);S+fpq{%fT#xa z4iw@uJ>F>2Y?pzoOLXG1>HU_W)Y!N4u=L#aQL%Naxj>#cOb4lwDPx9Zn>s~jl)eIWUf zp8faB1u<%`0zFqkgy205-C@=WT5MVJ?ebZnzJA@e@aEww)%M~+@I>UjA3d{ZkQ?`B?M6+;#pydW zG0D2joz+uvT=~WsBUqPV%)12lFXn++X&ThPMYZOs+3lM5*!#Q(tM|hB z%J}PU^k9wQUkjRV)Z0Kyw7Tf&kJxVqEg8L&Zz2;xu% zyWjwIhS5i?DS6(Ce;fVc4IaAH+CEFsV|LZ!9N-@2QhTB`-p_xY1er7bvgcc z2xL+5-WE>D{7QbVQ_GV;2wutG`y`_1!bVxipiP{VieH0yrnVtulL@@<^lX$<-yIjw zLg_qpsL~3CuS2wNd9glPq}D9~hZ&>+1eL9RpL&IV@G)jqTl4EFFOqebZTlGz`n_|f+=7T4G4|4^mQ&bBoKBuf9V_5dJr_-YrLj|`O zDgR>DRhm-=ZLCio%+|3x?&*r0@TBh2=ts7CZimO)SvoQB=kWDPMy~oiuPp2@v}|Pi zE2@fR{{8I=DP9kPc8qUD6tkbd%i?~^yq%P)s@|QMJxqD#HIN@qm#seK;mE2ubxPMN zuGlVto=SD$(oI?nm?Y%Pt9r5U1ewfXMX>=aJ}^X#aP zs#J-bdtd{uvkK})OOkY5(zjTr*-Yd;y67}k!+t|9Q+mBfhPWotjOTN+gBE|#CjhL7uG@I&vL9AM`+1$I;Tk15wb&>$F zYYekFp^NDrne|ox>vD#pE>kFQzk0<687T3Z8?xOrA$r_To&qFeP)X6tZ zakHhS*&ZP^H5u>7;u1Pp?AmyW>RUS8j!YWEgqYQ+HIE$@!^{sfP9~Zt9jix5xDD({ z0-Kla770fV!c$F@6%wcUxIbMUyQ5!&huC$I6=6wf315RL^kFB*zFjDAqM{Vk+-{bs zu>0dMKioy?;?K<+>Ck~4gUejA?BT1y-S`yO5Ac<28y=F5PccnWg6=zY^&3DGJ7D1Q zI-rkdV-jl@s#VX}fVR1->#u|Higan*_EqIM_;K1$mu`SYffUeyU%ounbB3JlzM;gp z1?kk>1a;Cb0v&=y&S_RT+9M8(BNlzDJ^LcxuJEJ+S$-3MLR(Ub0*!ljjozz?7vOpZC#t| zf=@V}?S?xt3u6DSalnt~YzXX&cmEf?35adbZl?{Pf6lQoi8lCWc_ghtT)HU)jXxgyh)SFMUaV%_x4-HfbQEO+ z$sh8LU{BEG4{guVQF~hnfUoeH0RQu zP4@KcG2cnv$=wr&+@_~cV-<)@^fryJ@jJB{j)TftW_HDhYq2&nQ^qx=aFtg!sRo6- zf%8n09>@ZqaBFL8c}xfb?*jW!s}b=2{d+u_fYC=BQfQ^0!DSA?(Dh}#7r)Z(i#syO z)FebaDJ8+l9aCjW;w)2p8!j7cYS8&L&N{X>Omd|JPXzJBaq+!j6Dd80FKeK%3u*>U zHV={M?g3}a1$UyZpXq@HE!#J10g9ZWU>!zcQSb#gBvlU)QxWY#f2r3mHg)F!VT*<}HZ^}d;8NzPhR|m}u z{*lwTqu;JLWX`@9XBhbH=ql0@`-t9D)5A`k1LMb9;z&bRMb(>}VbA$ociqWv zhgoW$*CR%6$YPr&<5G|ppTs6+{fjVds`%;K%|Yt+&aj~62r~1O zL|g&PAKnY5y84zg6~GE44SSz(+<=mx1!>_(OHhV0<>QICG3!YV z1*!k(JYp;n%{%H37~!e6yV*OyOw_+tr%My{IrYu&LJE5sqR37usC^`@c;DO@_uSj) zyod`~h*XwyLLS0{L0a?IbDU5@7S|MObuM-T7t5MNdBt;f1_?+V#Y<_9zCg=Z+W8iwlXD%uTFIPKH+9_=OeX##e7fK<@1yZ z;h9(*WaxztV6{>=(MoG|l>ZC~0?-Onf zK@QcKG{uZ0pPx`2>KbD@kQBuiO;j9Y?%KVtwM_`S0NDJOSGi$%Y+uV@hWn2MT1;ZAKJ0usP0{eiLAu%1b*#0b3LCDTFY`<4hT^P# zXM2p;pqI&HBS4$by$UJSMA&T;vP)Ks;()W z1BOMlWoN0lcl4)fl*G0k-{jC=?M&gk!`o)yyzsY^*Mde2&woumd9YO5WLS>tn&O0{yaZ{yIVvZ`Y8$6c$p`9#2Jfe$g<)r4N1&SU$nOhkqQND+Dtb4F#bogHbYq=*_cv6pZdoWBar+oyg~*~HeD8WCllg!oU=1b%5R0h zfs{*{wJ$h4snBiEN6d0=>w)3Wk24?z3>NO-rb>RTwLmGnHxEpI=`Z*nw7(@nyI=R7 zPjlMo;kW5P(E8(0ZgNt0`MCX(PDM>|`H!xJYI`H;sOmp{`M{?5+f6h%QAB$b9Af~w z*U9^Qs3`+U;;P~OqqLEynP4vMJM&#WJ9tHZby+IN(4=D`b@4s6b1n`jA3nKpIaw8> z^To6a%+!Ol<}<=Y9~=&8qWsqp#rhrL5k-T#CG!7q$LJ*q(#?c_K2oiqm69GE9{*&y zn0GxVzM%Vo3GE=hmn1o?ipHp?fcv$i{@I^R(U+_b5kSLTi33!S)5nlOnnX=x;t&c|^&ikT&jEkqr$#zsVMq6rP- zuiECNfCK3_+x?2Ajt1&HTOS+>gcC>|(8qvm*p!O;m@J2}W`4=RXK+1;!{D335#~kC zkG78o&a2VA>ki>TRbvGzX4}nEVHF1r70@Y>ti8f2{XYZKV2xqJXtol7CLI$`F>2-2 zm=4VYJO4zLSNnu`zdx$L@@x(KW*vZ*_R$*~r7kxuuHG||ov6$4{@vZ36&%Xia_ zn|p8U^m{J7u1q-^i~Ui2OL4_tqX=Y0JnutSp^HJOf9*=$p}f{zk1r+; zs+B4V$`LQcr~zvrfSM#fxGpOV{QzUd`a`Zn&<+BOHkPWYvnlisz&ikecx(UHdQJ|Y zWd;`w71(_lxCeAcC*scmuRy$K5Jik=1#*!4hqZWeHTt5EV5o`(*e#Ezl&Bo z>1$V__=1xNZ)&2Yw@}Jp!5Iw}8XOZ$w=|bnSUNa?vw82f6)PnS&N@<|K?W2%&9NP{ zQsPJk>XRd;)0_@#-!M-2p$kOBel^e_T&ghItu zlHg`=`tY&pb+!EKKd-)QMIK!p+QKdmca7WKZpzFIs1kf9-)vL0>q@z&4bUDm($N=-2db*FP|q@v{M@-7 z!k|!Z3w8(ibjKZb;D<}0BBELhG*&W7%07)-G&D5O=yZ=Ku^kEk6yF1T7zK1w{}x&R zqqHG(o5~7NPG@)~Zv~_2z3@H{amm5~4tT7D0}tp_V(Wu$l6EM9J8`@!k1p#6o>ap< zgp{Kr?!+y6IaLGJ(t($=<%W#o>(=}h1 zfddMq3*c^Zd+zfGC(c9AzKoC$_Z!19b?92Km$@P^eo977z2q1cYuf7rZIE1WPT-e~ z>-Pin0O_LBoW5n3Iyi`n)igU4ZUX();J1Cy0tgfZm?uO7nYO5ZA-}^|vkrm6_4hth zvYs6y@-HD?!{Jcqe$c`GJZUwpF06Yi6uN}g)~+CA0@+A#OuNAo)uWbip2V3Zvi52% zrp@mj8B|TB=jEkG^`(v&2_zly-(if7E$pUTSA_BgpMu`?6BKc}9uCYK{Y=_T-d>&J zDkQ@{el&i-Q`)shCzieHQa4kwZdR@-*eE9-^)Y%Ji1U}xjlG0q5e;KpJgdfV{tyI_ za2$-ap}*%w#wph0W<^Ko;;sSYxJYVqvo|}e+?C7tUMMu(&Dgx2u~{~555vpc5l9Lj za&$mPp6w0JGM}VG$Z?@dXbVs*iJ<*fFG+OBR86SZw6w8V>Wzmo=~kZ_VV#78MCwz$ zB|***BQ?{6p11+Y+26uyO?i1b+eSm?9vFw;yL;5s8-azDCaSK(b)+#WQT-kG(av;( zU&>q44~jbzO#v$?dq>fWDcMfBL#ZN>|B}D#lIi5 zn3Tafi?iM=yj378+Ehl4)!%LETy|Wxr&%N{Pt<+xTgIv)M) z|Mdb0R`iC9Ey6K*t3i>S=Q13NQ9VWgWH>%L$mXH`+G%bT80Ru08mc5$}WLmR| zodj-~I%0R!4i1N0_M^s(Brh-EKz9e6xuWltXql7!09JxFg2P?Dvw$vq!AP5=A=S8;f#t6U#jX0>WwIkeT^sX59mJ48+|2 zFCiP9h(%Sk#;*}b#;RBJ3X>o^#)`q^gmCD-X;A1YdQXL|_1k7$0k_c3Y7|Vds1)5Qesy@pY*ZXdS&$6ZtF~4=n6; zTT4Lk>({R#ARn=mXJX|D^QzODZ7piOQ71tMAui`Lj(_tNlk2jHBV1Zc1sk57QleF# zz0cDh2X{4=_}V571c55&zeK<0x?T^!f?SRS*W3L36j$03@hh4xhTw=^jRK_prt=2L z<{6)}yl)wHP|=10`P*OZ?JD92tmhY>TuLbN=72pKe9-m9rKlY32{D=q%l`r4_zINf z7)Pn5-X7EdFA4n?lvfgIgy5}WZt+J7IjpgYJzC&kgQLTqaHtg;E``<86a72CrY=1^ zW|3H>q~WBMUJm<>7YNIyeE5LG)1T?q z;vE!ZU5iPb2h4ITlA}WkE1<|&O_(rOxB(~*HobWT7#@_5F-C>{gKDadH9sIML6kFL zngJC9P5>UZ-(88ukOhtPPhoNe{7IO9dY z%W$>PM9ScyW!`5T31vfS1#tF&SBL+zWH-H%2J>A1=pspDSsKfHIvklH@f@g49HQdoy?dntlFLC41@ zRNvY8leoD)5qAD?M$^+BT3Lq!Hdi)xThUfx8?;MG7vZ(qrT zV;JSNeEb3e4lftBg#fDvQha`p4Xv^d9Ncx`gb@Ljw#B(~VaC+N#3$a)&(htFSqkiE z)@mP|0G_HxPHJLly_~4mumTb{?b;tt%rU_~ua?}zibQD@EdgOErQGH0<5B-uzGn&< zCMC^k!`0b&WmP_HWToU=EsSr(Hj(ZKDchwk#bf3G0`)j?x z@k;?S1wuCgkpA1r7eb(8y3kf{1Gsy5)I5Ax>(IVayu3hPIN-6dd&EN` z3~ijMM`ZwuRQY9;;Ixn3C(`{5E9cU-B2paDtsX#?`-G7@yqqpUH{z}{O*O!tv^|Ue zGYx0=Ma@^o0$W|uG@7lCbwGY3Ki`JSp~W{#Bn1S6woS#NBLE#d1oJq7cTfMW{M~^@ z2dFQghfXBmZHm_Vwp-bLq08yyG(?uiBpr4HP#X^4GH4HJpSPRyncQz;&!?ROB@Efk z5OlDi_!d+O>ePy5U<*kfy#>k=S{#70#V)QF4KAF@KWqT`vK80ERR)s9?^tgTZxINP zIy%Aw>A#ZCoCsJ|&VlZW6s$Q35gz*z;g~KQsaIu)^6?LzUSNwpAV(96=dmcCqz(C* z{DA+dyvDh`tO_KrArl^T@&1cRv+_kJX-K8h&aXT0OTLBu`8MKoGzCWm%A2?X?vUVI zXDEh)0B|G)@Y|xgU|!l@ID^9JXH{!mI9tY|%a){^-Ye3l)AqeRM$YB2)~&>#omf9| zGdUYE!1$1~PNn$f)~a9qoj&<~Eq$^`A>HA*tIpTqw}KC4d!Kxy2=|+`8r#d-ClW0@ zmE$4dk({_?_a|oQ?@k8Os*;RZNpwzyaf+o9x3Sz5^}csR*V7obXK2zNAuRFagerlJ zrAj)Wo}>zYws5F-`%8; zczv+|)n9`d>2iV+wIR?ZLb`Z{BxPb;F#q-TsqujH{ci2TSMq2(_ep5$WNibB%*!Td z9iS1bBSdz&Z4boX7~Q1t<**#05wE8&-T{IU+P<)X@<-D6zgU4#`g=vl7p!g0t$@0y z;nC;s%0#%o`jx$3U2Js+cy^`!uWb>az=_#;;WwlH9whq+-4)x&#wX~b;{?iVgD`9X zEyQpC-W;%^q<^h|e1s0ObfgZU(*&xF5;FbUgujJA{M179gh$6CDTJLzxyhv z>E=mbC~@v*@nTVlbLA_hweOyf$>y%AM=M(F9D{Ee^)k~YbbU+xj?puz=FHpX(GV*0n-8ddX>sLUNWB=^L40< z>?&HR9h^e#ED7U2`bd6zjG9jp`X_I!=rHzkret=*Y)+e_xZ}B>69_n6VT3o z`xO5_vc3YU%602{6Vj3*4Wb|d(j5XK(h?FP-67r5s33}zgrL$0(o)iml+xWH-5}ES zudV0Yd%y9Y;W^_7d%yeLPpmc9oO3-d)0U^M@+YU$I!aO7r*)vaY(F_+u6EjAJA8dG z?SqTt$QKms&7wI3uz`0#Luj8dy}B;>m(02XZt7(pCbh?UU7%ejUL;kZa&7*VbUGCO zNUnceNT9y<6JTyPs_orkcs!%Jg7oUG@5yRhtycXV{byOQ;_#Bdu7Nm7x` ziQFap0lvMkB)^AoRGm(S(>y^>9X7}9t2^}UzCS<>>!Pi9g{1`kxyA|;9gyyxTM{=jQAqKnD^Y5~ZQpD%MIUMDIGIzY>;bozpu0`W zi>o9^(_XoE78UHu!#Bgb({O^$^{8%X`_*&0E)V?rUXqJT;#J3Lry3VQr(=6L;>{%a zt=s9x)_S8}9n+0oXLLVP#$=rJPiny(sC#lNjP`=7&Tap9(xQeky(`t=uFtRuA~^?~ z#Li!L+IHwH28w(5uNHH}>QJ4WuM*>n?I!j3p1X=xT&$#i-I-Vs*CYC4h32Xy8MCHR zoc@;k2yr($V8IC=spL)^-g*g<$-}I@f=?Lrs43d-yk3hCQf zQ*2zXK)km=EpS8ADoHkft|kBOnA$%cqkek&6feo|h@-OUK2p6OU6z#=n}sx#Wh>Pg zJFq+kQdBNUN2NFdR6ourm8IOz5tX5QdMyy0ArU`}#+ z+DN9;{moFzxIxI5S4Txx$I8j4vEA|@8sx6kUHUSN_WTy_J@%-sZ&(Xs- zJ%Ul~U!^+0ZUlb~yfba~t$g)K*#!Yh=|t-n5-Bmz5rI$k}T>;)7-+s7n7 z?$kFZZB~fWvS$RYscp1aX?dCNm4Es{qsm~ad((=>psN{HNWp4}E`Zh=y zn0h?;uRpx*rp@)(XlLYC2Yws(_wkhuY2to4w?z++3&JHW@i5nSqg_ne7 zTKjyamumB=F4p$HAP|cbH>7!MjGR@l=}kX>gto320SkV&t2}G8hbtHlCu=dt$t_^ud;tReEA)*OFrKq?iVS zK7e5(wUTa{{YqhY7MeuS^GLsT^NM(U@4W+q*0NgHTjMDace0a{5eBF2Nn7nK64t(S zx(aWw4O-`L`@~+aE3zZJ$lpbiY1riJpIH0>myWpZIP;-Us=vpqX+(#xVTiT z>GG^OeMOylvJ1KaPga&Cty|Oaq@-kj{d7b4Ui0&9J-v4$fDoaw|9keM-_G7Q4u$|z zbp@G61Jl(66Iy%avrfW<+b5jKtIwwoj`D4`e_fKK&X2f%HhEHidF!5gN7XftpJ(Gl zwDgY6(9(HaTYqob&;HTv0K!NWRXHL2*GHrHFV>A%VsOeVE-t?icrc33nDdiwb@)^) zQ+S+Hz`8NBlpyqT_g3X7`hCWQa$#q!%Vb{e#hdp+`$xvn3Wi+2EO_ot-(I8q6o$!1J1T^!VT&q6>{~ zoRbUva4P8+_}2>qim##XiXuu}T$pOa`=jyMm#%NJu|4M?OwBCEyZpJkxq8>I?D2MQ zF7^RGu?Gz3rx@;AO1!o|CWG6~6j)6Kb?M&-ZFs7W<6hH}dJ*|bF8d*@u7HSy#r1ZR zXmmwc{B6&P^$P`7TD~Wb=yDCE#@{rNa(Ce)c10s}G3(bH@8#J9O`2n$RC%<#YA?L| z8xJqV)8vc-gXaNLB>afz0mn=G58S2reIKSVzZTtD&Br3~n_F?VTv-lV4d7gwcL)p- zMwtHisf6;KM2vQi;E8~<5RIyz$aSKbS5gHg-A$uWt*JkY$8QV5PvD*&hupUOoK*MJ ziE&^3r%8V34dS~PH@@FCQ>H5!D=UJv%Ygr-H58XE6oKd!!^SstQ0;}k6u|p+>0fO6 zu=>vJ&ozk<^o;hu@t+QYUm4YHv42KO8xz|;MAfAkoD;AUN4};;c{91791o?A;KPsh zX%E@KSBp3uJko9#bWt`gG;|#W<&hey}rmHlC!7$Pmjvzbi+1ELp{8;m60mamtV%&s_MQ7KB zJjmjrba4=(A>4>EoH=HwqDz!`^|F5n1wrX5z#Ch|RsFWaHt}*;wE}BS<0js#Zw3{8bOXN*L{e^ZENeYwFdBDRi>jYd!XiA`+PlFO%ftyW#CfQU}kTes>P>NhWJ z6&a=`WnfHU1(mqze_wWqSqasB_L65KJ|P6|Fh$k9$*$P2a|=!RnyDWZ*qd zePcOks9waF8Qt(-LR?? z2SK>dx_O1GyCOd83%8=|ZH~bUtn`X^=<5(F;E%S`HRWo$YVtSsHji$1kSQ5CK7M$E zDrM`arS|lb=WkdyNq4*vi4t*d&6?hCptY?YDGDsMc2=o6N-|6(4HU9<=*;frGMss~ z$P_owN#t7_u?}yT=NG#e{oMD|R5|P{q7gt%SJnHLv~x}_enPQ;LKvC-G-o)Ya@%6w zn$-0{gr?@*Je$_Qcha!29B&?FsMT+wZ7(ZHh~}poV)`=?N{a7d!#CuD(35R(Dd#1r z8x`MI9GCsyX1~1zc$tNXor9hW@k?^k+hJZ)K76YA2-Qu9rbl;kvAQ#k317(!>&&Te z{PsE-)i9fA{?ffDdYHHMxweBZwjo!fS{|z9VBOpd2qEVjyae0W@I#-B50@Nkj)SXu zx6pS^0h0jfNg7S=%UE4rn-#)w>LwvVT}`A#neiC0{V02Z&Z^km5KouaSIlH=^mA_q z9;AqDk{vpyAHfVg-HH-n_EV+2ESwnw%`T+`i>IxXn_Y>bEfrTzAy|Y~zS))Whd^eO zmW}#|I&G}BV7HSDYezZFv#b1`xK8K7X2EV-e9+TjHiK(Y7Rm+#LU$lUI0WLM4Kq2G z>eKEWN$Fd3cwqU6M?4L)toFN6$YS*^SV$*W{(Ip?dn+=}2{8|?e(iA9Nzb%?{$r&c zHqXF!x9v-faqg&MyP_k%&dKkf-NW_^`t^(M!5h-ojOCs_ku}aniCh?;u`ivjU#y88 z3YsJ!GI$ype;p)6D83p;Szh<#S*Po8`{g-dnG<_s=D>EKIi|2|C#3AR)?TgzM9Y$* zCkhu5I>MDRJ~J;Jx-6@AMJx28Q+xMhM4D%p zf_oUpc~iQi<`xS(?g{+|_y|XJx|p=gPk0DU4TkuvC6^d(NhYI%m_YYfZ6)gDxR^=I6cEwGRR6#sYymi|Lx&i3nYyuo-XZg<^4#D>blQktB8 zx9sDcaSzr_4mL^H7+zFqn{WGidq_8;>Q8!VRGY;2rlrl6GKuH-^?{oL#qZEXQ!AcBQdmuM(ybAr z*w>v~*|p5|p_~e`Q>2<2?TcR=3R76GzOU-Le$<=lL#)d)fp5KjH75FQ+9ph7_b3X) zZCLD=Pf&&Q4{#{0lV1K<@%LS~b@NlaPv)PdI)RcYGHw{(H&^tF+J#O_fKO%z{?Fg= zm#SrTaOF+6lN)3D@X4>|T)!BqQ`yXW-r3BQ7K`#8dwQ%Z&-JNlhXB$4(tV_*+;?$q z+qr+ovc!8V*+c}9vQ(9x#=7ook^kkXL66%<#`R*o@uzIglTMR9RvYC3Eil9<3U3O! zby9tE<4od8g_^?dMsqX(SZQItGmfyxzRyYHpVzfk*VET_?xkk+vT6TIrnPa-L-k>n zA~%_chy-H~4+{4!L8l_L0zGxKdzPMeU)Hy{+vZkf>n=XLWv8P45M!%pM&ia@j34}DhG*K*xi}+4v4NR|87%`5WfZyV<<+^>6k2E) zll;N9E#;b5>z$?R-;_lwy=dKO{B2lRnD4>**A~jK-R9R^8&}Fpm{bj&>EQ_Lxui^< z>JypCK?W&usB4DnJzlsl|8Zg2=*t_A(#mCc@Os7CTl50lABIf>A{{I)QjKKuyi)dc z_u+`G(Oa!XG5&7Fpc*&$^of><1NGWu?%qR8MC}YV-MJQ%x?GS^tR}ZFZ!w21UZ#Crad{J?M;qJBZF3eX5@qW#_Hogj=Yj` zVbv7WEoi&`PSAnad1f1Y>hqxWynV`b=a=#*adJKWCW1F!EIenJDQ&U(%aPBaqOxWYskRZBuNb{O6%KQ|cfSRRQnznL@ zVqE3?T+J-}>O|#vrkbi+j)ajTh2|aBr$EwK4(hFYeW+z>f>asUY-Uu4*0K5x_FUKW ziRlhM3j>P3uMH>5l+_D-+oq53j*z6s8qt_ zONv1=VP4E&lO*dz&OGR8v&8Ni4Xbt?oBHSO^ri!R!Fw=NV-VA0`PaWh2(EaHS zbLa|Q^GEyd^QapE*;fE|Tr{7|A*(?-f2(zUORI12g>HOcUFVfvR4Rc;G! zW=Zy%#);vsc#7Q~P)_5IGs-iAn>3QI)mR>nUp>`#S;&|PoxJqtOZ6Ens!Cb_H@X9sk1VzMq?WNj$r#b}%0-@{<| z(lROF0j;|bnSg8Fm6+(yqCYz|%$Q@exX+GSqgb&m60&`v7H=gKLWAtG3niW>#jTQ^k`FW70K#*94P^*F<7Q{ld)!Z5+6U z^k&t=s=m%WY@9AjjdWTT$C@HC-%pCI6W>G-KuuoybCU-1sX_2abG@4lF=ziF$8Gi( zPYiEm3&i8PqeW&p)bmzK9W}^f=(eYIql z>Lq<%?sIvb5L~tfoO^sSq^e2Q)vE6PUO2(o#g}2?VA%oi*a_GpXliyX%cvR*a|N!k>KncuUvMV`vSWw9HL7^73-qg2OU?2^8} zg)-Gy5F)&o^$=`Am)`1>3E2l$oF61amsL%^<|(9%Ra^>eB`aukOD?~?>E%B&Kyhv7 zHwk-3>W!UMc5$5#;ZQe?ctCKqAbF>&gh_R;>HUCw8W+tQ7+{e)H6MCmRsJd4?lx04 zjHrl?!5IvHK7-#UacAE?MAf~wZl>aXU<=iW#cdHsv)0s5D~p5Jt0sx-tx<4};@@Wk zn3x|OH~t(^%fK3Cy@ee|7-{W$zIkFZM>mo}<1e+&uP`7fCvkXcl`n!GHFu99lr<98 zEydEZP6K*!kI-*L6RsgI$?%Fr%hmJ-`~cqh3KmAr7;>qT)JHUSf+UJlG`;&p?q;Rj z1bXQd7~vd-pHUrWS?Gy{7qPcy2M?Nlh&W5zg_-Lrj`ihboKUP%QjdrvS}t ztj~?-uJ+?g0K5PmED7PWDY#d&pC4n5KSNR^0TFqx|B^AR3mKncG-h^QuzrNz)Ah`G zfGbpUvu}6<->J<9<~kgDD%=qJz}H|`Jd^gA1b~;(U=6i<9mKs-N{Pyn!5=%Y)gfA6 z1x)><@$G`(Jq5PX!5>B+`Wn_8o7NoXS`M#O`9EsP+JC@^8ThVLops#N#GBLRByG&_ zb3m37VRpv8pufzG4P8d|a9MPyfEoCj8RN*Win^KSJ(D@Xpg3`AdrY8-(4OaE!ih4(VMJRaUCNP;SZEcJ7@ zENhMmgb#otHhliDhf3ok9{cc)j1Yw0;CP4_&z_|6lrx44?=@(@h@ zFPuXxJ-L!s8~s;RFB?OwOngS)WyN~E5PeW);bZn09miF-OFwa!lD))7-iiSF%DC(~ zVW4fQkjMoDSHbGzhO<|q7>hjaLNt&G`U;i+H1~`V-R5cKee!;H%HbFedYpid4Hl*Y z$ZzAFwcLw0+xQ-1#P(fzp%iVC`Kxb{nQQ#zsgA2O-(Zgbv`JaDc>(z%o26wIhXNyC z*_==Az2nA^Eiz<%t*#EKx>yM;FW}vI1Kc6B|6w1-W{l=CqCkM%o?>$6w0(*>g=v@T zIT4}xy^&bv{a9T}XeC4^#qsz!{%W+U&hP95><~<3ZyQaMSj`XF0tPjbd4onZZmLfw zJVwbfwR@k@3?dE}TxpSlz8Cky&%3aZCgb+BP=r>H{D>_2Ji!!tBbd3^;=ntb? zrQf!>@Hjf@?dxrR9GE%#+hbsWHps*LLqa(F_gHi?rq}(wtSrn;6S>Uh8-9zToD~>v z{g#BqJ1S7>-%%`J2lV)DbA@2gRB6SQJj8&^0>#t|wJPDpGP2|MRq=0tVsnIuV>39` zm8cJhC09%f+xnV&_HnPnqNrDml)lN^B~5MBODkR~FoZp-^pJ}# z{LZ_LTW$Jo@C1bVvDgbH+i~lh*n8Ua_Y`DKHQKQ0tNJrb$@V>1U-Ix4Y#-A3TuM*< zQXKBgjU8vhanpluaO@($6j?NWPV)V*!3kVmL_DcGPEYmVbEX5j38G!!_I)QI8cth= zMkRaocB4ph?Dh;4SF*$Z5S_4>oE}c#*AvWln;d4mUcA`63p3M1e3z7o(qxgHuqXi; zHzlz~KR_1Nm?n!xE5L4D(|=Zy%3}W0fqe!$z-BN3t$BfWFNjc*AM$}bYS;4$iW`gO z540{ncHsi%0_Ohdk@`s@c`hECXQQFKqWgKdyPC#K+Z2+C#8N;F8%MeJ@dmLBM@GyW z%Wp}9lBs+G0&_h)Uq|sxy0vysfVd20QN}~$B$DQOY8obTrDlGj9=7}7woqcWNIl^y z?65JkQLq%(rE;xBelq}3f8SNSR_~TEof@V;+`8P|yLVA)52M7Dtx2yn+IDW;>pw7s zG8lpNP70sxL3L^2^P~B_yKZdclbCc{!FS754&w2bTQNohIqq@!&22S?5y&OINME>x zm4zxLR}Vw%v_H=U8_YBuShog1J;*YwCO`bc&qflg-Z4jPNVzJ^aZK~ntG4Apk~rm2VHQv*G***ZGb?_T`%Mc~4iwc8*&WOZYMR%XVn!$wy-nRA zb0A4T>f``o$u|6l>JP*21>u_^Y550=ipd9bQxHU}re2kjNjL;6a0lLk{E{g~srZVo zo4hIUFX@SZOP6{IK{}ZQz7vUCy)Ux3JMLGD4NQa(P5Wizb-!DjL;k9im##Zz(b7tKSE@7TYS@+D>7Q>BMVk|PMaum8< z_eX560!yS7SN5|N?Vjoh_Mwjo>v$8f^PAuw2n6Ne{rxTs5==y#g<^SK$txmTj-kRZ zsMQhvXw7CwGO(@$i`F2*B|QnshS4w;%XR1Cn|PU(z>EW(bewsRcj;76hXa`>A(Z1^ ziER)yd47;W1*O5OZ^SIbkuUJ0qf?TT{e9wh7!04Bl1SoRI3)mWJq$gK4FbV+_vGp+ zS`=Rksl=Zu2K3AufUYyBwSRJsTX~Qdem;ew7*=yRw33R_75WxQZKl$Is4cmR30cg# z%r40G$ASxz*^c91z9!yFuJQc$dEa%MH$RfA#IT`CZTY!ZKHB0pFRYM96N{{+Rc-0) zLM|85_$z6M!^DrRE;Pu?$aNegQF~D>QkYDPbn-}(U#`q-sRs7JG6{Xl$0_Je5~KuO z!G8aW$0Rpfn?&UQ$pPo$qwn>zFNox$9-u0bSq0*;(Ub(=ruQOpUK7^i&3<#pGetLN zA)Y3RkaWxGsW{5IaveLNv_s=+@oYy$rqbP}mEE{%G4!o_0#LJ-I_6YR+mw~KxyjUg zhBzUwAr7QdNbO*21xCYYm|ZDKXSLs60}iBK;D8)zKQCsY0IN}c zKYALZzI5(X-PA>GZkI;PR-h@~D+Bm+P|E*{*!XKyIprZR&$?BvRChkiW-bk#ig}lE zZCJ6w|Llu?`;gtlJA&p*XK|hP(K>HsUDdZ5wC((r;lutj%re%i5Xod!htad*k~TQo zX!*Igo)OsAHBZ-2BoNBcIs$!u%kz^|&d^JK3AHR;k}=1bCj~8I7Kws~+8MLd!r#w+ zs-@=kQ3=m1)w&VM?bBNOPgy~bR_!2f5G*_K4l|N z{H<41=LBLftmm(5r|dYg)siyLoQSTRO_I}n^Ur2ow}+^Sr1Ua0Q9zQ(+F*!eVt#c} zEpf;&gnC}G{$`$5`%mVPLt#W=T9T0S=(to7$;_p%#@Y&aQ}Jc{r=>Wm&o>38KI#TN zbF^o6yL|4#Ucx6}VoG%Z^rfm^4p!Cp+pez&Vp~={cWPO?@~20ijpXVboe^;7pL;Xe ze|{a-{{e6^0v#$*h=K7Jj8P*$It&cKR+cr_7&HH=xc{b>1lZaEcqC>=XR8=kfBimEFbEl`=F3NnhSH z)_y<-^Z^b)ILS0+&A7w?7v=aS5{g;NfrhtxNHvQ=6A?%KMj!D5P`My!ZV=s=qv2Ij zlkaZQec?5sC>mtZpAD2M9}@M~TRgdtsU$Kn$bz+aYht-A#IjsOsbd=V%@0ev3tCc; zROr1hyk|+qNpV3$9mra3)v;H_R*Ad=O&1rQy9qWyg-kBl$ExLpgqk`v)V6z0-X*HR zeGmYkfM91GM{*iKH$jz1UM@!A>;?2($tt_sw<){guyD!P-4Lo~d62007BK4dD#gS7RPIngOV z|FJO}HbFAYg|8lG1K2+q?MdG0+ROP^=Ray=PS@IrQr>fRlM#gk29Ew+Y^>u!7}{S7 zb|3u=ldd>tY0UN=aO9LqU(p0&KgZo?si_`eP0^xet%2Cfh+YrG088u2G76F439jCljs3e`HcHq^eVk z$jQ}@XQ~a>^*;5*bolVB7gCz^_VZ~jXUQu26DAH5gA)dTs(kX8K_`Z#m;{B|Kk|QEvG@`nBk*LFa2GZ zfTbA5p9>)_-XZ}IgB38l1=ZCZVgLsZ)2;Z~u&)C+ZL{NoK*wsY-+`?AB1_H_-@r4F z4F4Ty)EN1|*zTIUa`i$CYP;Fk?WE#<2WJJ;h*bmt+hEA0kNhrNz1MVtifC}?8Dzd` zlZQmq1S2bKAv1|dIAC`eDc8c4i$T24-#@>hNtX*U8~mW>t4`U@Y|tXy{XQpBj-Idl z3xQq6LjYA`!)JoOVizf<%cld@#7F!Q$YJKGJu~>nji5;FW6wU>*^neUi4#U3i&h-? zXS+G+R7eBmdcb@L5>v{$G0u48T(b5xXnAoxAi^}^jt5@1m`>EcV7?4(8|`Qpgv7r$ z6#<}7n+@0^2qrdHk(%ny z)P({*gDc=&x^!vXi+F%~?jT3ED09=k%VxY5=1r+B{O9Sk!CfMdE+ySwLH)Y!_iG34NmkqE(fROU z{^=K_&}JI`>La@D|Hj5onbwlBVtkqwJiCCEY-1dZsLX#*6$=EP5f<%`4ZCgR;&GKuNaw^v}}! zU-1cUfEl(){mH{`>=@F2=1Ud_Jm2T8k=Y0$NhcY(?RB zLkRmVk!g#0v^NV~)&=yYh7e zc}(H6fNXhJ?N$*HG)<86a_TOGYxnZ*jp7HwB(!5A{Xr2nq{;u;l_CVn*8+-|1T`!N zizQhlqi9V2zrX>mAKHV=*)ofTg)NH1b3GSs;wjI?3x_Y^xs91j4Zs-tAj>uukK%@ z$7rlye1ST+>~TSDZ5Q(LGN6xw5NRPo0o*zm9^en)_#Ph4EBu$NpkN&J6!LnMT2LMm zIt=CrseW82bk+7)BxR-l6zs4>qaXPlN@@`!4MBpPo#bGnKtWt#Dvt{62a<43QsD-0 zl-k0T+f-$A!$8^FPxyk_r|@-KO4{+Cge8om)4-78fBqsoW_&bZ1~*B9x4DQ=JTlAMA56qHL)GZ`-TMvEEWV$~)OnCcE&43Nr$+YnGl+@EBI%da?Gt3eCZ)(N(zGetJ?aQ@7(30VIw%u=0-*@wLI8Kh&D(= zo}4&j*=`0vNT@H+@0tsT#`N5fKo za)U%-;a%4xr`|6}NDPIsP`-ZBx3L!Wo4|~@eI%U$-p1t$dY-f*WEk3(~rS zIO8XlbZUJT$$jO@m4a|)On)g=tCx<|Md;Qj;!xttSJ2liD&*t)Ez`WJK0V`-Ssy;F zEij$#soO!9gc%xHk$2{i*0r=URe0O?n@>`V7O(2RmQ!jI5wNG1ye7STQzg2c|(ki#5P{J{?oWx(IXKc6*vJv&p-GAQLMXGCTkVac_5&n zH_9rr# zRh_6nF}w?751FW>aijT#E(wl>mryZPB|h`DA%wgFyQW+?Tjri@&@*HfH07vT(*5@9 z7x&3wAw1o7Pm5#!xNgFUG#TiqPo&%sckHc1OqYPxw930u@_r*-1S3+cBe=g^-BbRZ zdPl2Cc)G1~z^C=it#gS#Y~2pMs$xH0`%BTeW*w5LFWL46E*U&&bGrq2COSIOpTnh2E=C>R7(w{s z$B%Napv^FVV=@qN{^GMgwo1}gC9se{{?)BMRYT@>BpQGxQ@;L#m;&aJp!k2Ya2sSK zcw{B$^Kd_)a*U%CXfG0O`_$?c!rvtU7gX;J>HIN(HcL6_r8~j?=;<9h_yg*J-Xn%@yy;D=FJ^Lspy-@m*a{k+1LbOD&$=`B z$j5<=ZS$3hS$<&xp~9yGYMtw6cgX0)kEzvJND=>_wbZ|cVmBY+Z6E@#7c%6g@BsG@ z)GwqcvKMzn&fVEjV0f$MVJ>hj7*aUj*eda#z<)ItxQmoF;1h zu4^Jf|4vp!s8k9m(fvWEdeLYSvWr*SCEGh(2G zuxjEB_GsLep0w+5Zo4FMRJ}Dn@({vA!RpELzakzXOAJenTs>Xq2Z!zze;=_ChfWfO zYkAQr$C;?ruina~-Ngi=6=njeztmFFqiWboP%0;R&>|3uYM!?{bWCc+dEWtp_;fKd ze%BT6OY3EL&v4)MLsWVY(!ip`MWxZP_C9hRJc}m2zAmUYNkockqzdoTt~H*vLVQ9+ zK;j-5t77=wl<(BG=poV;=^qloF`#>6;fi|@vUtp zp?khJuP#o2({8&%)uUtN{Ra3oKwD8&*L}C`Y)qUd^R)(e&CINrd%mg6iym}?`$c}c ze`%!!yhK*3bi81a2dcw0S^= z03(WcX)QdwWUX2MB35S;0lFo9UPMZInwP>jxXm+4k7vl%9Vp?jnLnaQX(v%+W`D_n&#*E;YblW9{nMQYsJxg?1<%^S#9iB$grJ*+!EO|~ulLSyJc>A=O6i$_w<1ugLtoYD9rBEE$yu!Ac8WU=#G74EHsj4X2DC3Wj@pRzMz^d zZ~%W(EcwS%m`5}DG~tGL;My_f^Y(S{;z9;c-64&MPFq?v>?8Ft)+0eUJRS|RdJ(qk z122ATkI6Az^TM7iSVctz@uv_Bh=_&^JY%?GU=)`2?7f`gTpbvU;VuuiyU>uR4J@8_ zOO(NZMpoPqkF@g`9%f)eh(`E>mAo-_qe|9!U+Vgh`mO0n$Sl4;U6C z%R)~3K#!reM1n7^ona+1wLug$w1fLbW6Aei^RBP&y-iyN|KAnT*c31@fg=a5n)(y> z&_{JEN>g*T`TN7Cbd{qc*TXfUIphS5?aLmIh~PpbG!It=P-+cT2~CWt#~nNJ&(Oj+ zSg$g|r>>VH0RU0oJNpW7QtY2i`iIG(lqxA=$uikkU%3yw{rVgf>rd&>=Ss$N*PTJ` z8RvTK9S)e@caem5NbC{-r3wm@5gI({`Mn6)`mHyQpR48Bgjb#qes=?97x?43)jhfW zEnwA`#Si1os-<}!H4@jW8I?We6r!!3BlUd1$>;(@0rf>|XdylI7cVmzMNCv;7i6b~ z*oux;0AUeL^c|nWMFa0UJGgSvY`1TliUj0pAAR8fYxG0jM4@nya=>AKh}$a$4SQSM z+)HJaCg|>+TXWRWE(zQ+>vLh4(d&M)A!3QoT3v^|NpM?r8IdMx$q6{pn!4v4_q5_|(KD zb|YVjsrVhNk;feyfT)pW*jIn?)e5a8PNMwoSk~H}_+TxXUp>yymRtpMN%mr}jCK2A({ySE zeu;@rbEqazvtE9m;nEX+xmj_0>a}jyOoj5Iw=>P~v!9(lO6_h%aq^;t!hCO$ z^kFtyL#*?p%Nw=;Nb$?|cw5KapyHbAR@gsh5a*`}gsmdx>Ep+&i4Z zG{ciYF2?z<^KKGbj4AAiOM=bh_v1f=DGnn_R6_mM3H5yd9NlgfI{N~t6{ujNhat$P zuyY;?oX&##dH%I`PSLNNOYBwZPbUZ%h5d{}-i1`^9lxVGm2QmQ&QPVW7W2&xl>n%+ z8^WcfdaF5LCGmFrQPFK-0xp= z+UBMbZ$43Yoq^7^+nsiPzBREVoDj#%fi#o3dB@?%WGPZ8#J1g^F6VUDmpWnicz)19 zty8@g

E@)5Mm{n6S-S~vuOuaW|v5Iisy$=nqHm-YL+iE6x3IYYIBFL-mWJY}@) z-m7oCN}yh4N{=ltJo)k2MEgZ*J~n7!P_d@2wxQL7`7AO?ajp_LvbSHy8Y^asWUm^=O(W9XI0jfkPja5 zIOR5NFZ)oPC^Ws|RhzsybKt1&C;cMqEWYnOVG@q{Wi)7C$h00^a4B$PReJeF1GqrQ z5f50H<{-(>>|6pzSNw39=42qGHppL3o`l=)tCZ$mwAvdQ8$b7Ozsx3Frbx`S4a+uMk@m;FCaGL1s|VTK_!SrVwA+FJ70RB!F++^ZO3P71xHB zVID)@*jZX)WPi8_LZ-hGN^sXP)lng|oVJq?l!>0qszS~Su5m7|?l|DRu8lEavkqpe z@67@;1);gTtON^WWA79wCu3;7Zcvbamw&LAGrWg%8Dt1#YyoLe?0O6W8R3Xv(pxoo zK!z9yqnv{wY7Im6@m6-(!KcU6AOh+t;4A9WL0g`|`V!@se;)V*BwV5NAb2qCMS1!2 z3z|zO)ue781KFY|U#~boJa;(T5)sth`Tg$u!%{bwIq+%$2!h*r^&!n^J5aJb{>^%J zIRJ-ZO{LnM-yU%m&xenp22Y*+Ed;-QN>>geQ@CJ$qx){|;H!#lNaY%5NK--nmVbAv zFD(LuCJWcg{%DM5?Hqr4^&%YL6tzR?9ruF$g^STrAI)Kwp1$6|_O%zl_>HuX66T8M ztMA<#9DP~wSWWABw(RYOzq%Ee912*|k$7v0fPJKS5XEw1*?g@UlWO@$XBB}Ip8%HN zT0F=8vKXP^NARc(J~~K4DLY?7;-Ho^J5`1%ZWgf zSq#ax&0$Tt_+3G7XKU7I?*IA=yja=BZ)8!?(Y2;4#BPsTB$!2zsRAq?>JDtg6rG?Xr`)iRjEaN z<0w?*Xq5;4j%;#U`tbe8&9*w+`oS};xNg4pyGgli);!~v_1Qb>o~d7Ke0p9{i?W`% zwzngGEdNWx{~vKcaZ!K2!H>#$)$~d4m+TM z)H9*MN;Bnh4V3dYY$SKPvi2ph5T~n)9NQ=NFjdvm(5~IZoRK{E_*ZWT(9+j$nH0fa zMXcWiumRZ21WYy{u?7PC?8Bj0r`nx<|EaH~e_SWwRfN>05E;Gi&)!Mre~1O-(b2&s zOvRDV>#Lkj@e*2AU@zh+Of2kN0+B%y`d}#D>nOEpBAFCq`j4yv58%}#Z|X0#WAPg* zSoB_m%GacNK>-ErRRf1i z6*o6&{4eaQQbEpP_=oHv{VO5BSJ|$;QuyXsGuSW``ftY|7=e?KgvXMcuFZ@5rLl*m zor#cMB$Mwz3=m2_O#aJ`{P7y|Ac+j5;b04t4UpIVXheVHHOpgP)Gw_ye<>ZbRdyGb z>UaMNe(pFCgWN&(cho+TC{S*>z@mYsW<*i3VhQkRV9H4SS@b}I6f|&@gPlHH3zh ze?<@oajN^uWRR!<`C3h8^d=*f90YWnN;nq%BV${O(Pdl~F6XD*Y!Z0D>!ge|y7y;f zt^cE|M*5dM1IpxYD@A6O1_Dg=vI6-;$9a93RS0Ao+fL#JzP1Q^3Zzq57m}Z|>H7JZ zz}N&eRdU5d!u}Zv@$0`_%>eanHjB>}2y9`*t|smfcTeVoHLNhi%s44`DVia-sX2~{ zFi0?^YQ0&jOWv22uYChQ(f(dEuM&6Y-%I4Z7P+*H`D;ml&mvwFki3x{zo4=@Kmw?8 zpmar&(gG$@d(uJI`oPzT=;>>?lTG`Fif*jt%Q~ov$B)bSU0(J!I-cpBgm0>2f08q> z(J;Ibnoi2%IEiRCz)`$!?_rD$oX(um3($RxVUG=lDgm$a5*OSD-0K6OW*11vzdY6H|TAzdH0$xHi z1auds6o2Mez>$PvV-t;fqAci%-^qH2hN*3 z4^U~#z^WL&WvuiD&v=QfeD0xi{x?OW&o4^Mm z+Ys)2e(~!`DbnxxKeTvBlxB}R8ogKVb2d*(_nV+Tf(_&|!wg&8$II;IUpf5z22Q=7 zZDS)E`VtDIGkEBSN@$=96=L=}18_x+i)-UcY@FS34&Zx+1qCZQu(P|a?g}h4z6C}a zxkAO5Y-l0m-w`5KdvD1`c!!Ne6|X&PfTsd;Du}?iWoM-2lWTT#3E%fbG4EZD+Qh>^ zq$V>99=iX!Am8bk*|oHVDXsx)I89E!XdK^XuY-9_4~WzTD?YFoY1F#1dBcMk%X6Dtsd*Pe;ioBTpNvP>J?QVk5gCF~8ve3Z!5ckuZ`^*z z-M%m0M^e^q8Qd&~oBc8XOz;5cfFSyHfkncLRINd}L>X08>>$&te#=CrTTiJqjFsdF zDsNcS0|SY&u;ZvLNIvFC$ZT)zWRRxoK=U6^Oyfly>>Y)0hES1OjPdM;Us71@G$2+B zk|$3~OUiBvsyw_}N=UwdFqN>}@Jr9;up5jeV^x^-Sw;%V&}t@UD;{-YH089m8%g9L zJKIvTxEq(jm0gmmrKS&xGE#MZ&G@kv?Rs8j)G`hOat+Ybo zCVX{K9D`8zcW}e0xEc;$q|elIB3t{8%AHhaP(z+0V9i*=NR~Fm$pU4FWJyM*K z`GgGc;OT~wG}7l78(ewyy+V<`80Z&Z{c|9IAfG1xY>SoyLz2}*Yrn%yt%{e-h3$kJ zxJp!%KCIr_Z$M*|o{Vv*(oJ}+&X*`g0D>ZPNQ2&^s8*S|;d#fdx>off5!q>mh5&P4 z{T%KHas1m>uI$USk08@%hjq)OZA$*7 z*ER`lQ~}-K(+EfZ7m%q~41eS`5W3I)`Ne}@iWx?%#0nt(g0#STvz0C{c-$d_25=O3 zkC7Jnfr0Oi_BRZYk29%rXazQ9Ca|$=wZx#;4VFIMG`Fp13K#fiz;cZ=&qD}OaV*2i_rI-S+@me)RRt>Hb$i%10g0wrdVmTd zbge}B4a#u+aFJZKx2wxF04ns)&qLg9!>KtF&cY5*)#b2ma=Xj!A4eyF*rZfk`$HU%_aKnACIoF&cuIn0Otu-E(ik+*N02SrN&!?Hsz)k1v=6=AP zxCYGNb}5$2N6W;IlrU3AZ$W2^nl;bj#?Qhdjh9akLaWH4u@&!m+t(;tNCno1b6*`P znY{0UIUXWxCicFrn3kGQluKe)Sl^>a1Z|Llwl9uliX`&&lpziKaOjMIy92Gxr6r!+ z;0fx+KtBGT?YwDl`<8pq%UdjWF2O6~!rwNlf4aJ!Ls9HkrHpeE++}IfYvcz!t+4=Y zw}62Zq~V6-mqOw{pruQZ$(fZZ=mdp|HJWJa>;K#LMdS?~oX{TrpDxW8;>H5A0F|H} zf^XbXPZ<{!8LLMlR_~ooT^Km`AHs6m6?yTR&Ic;(#oTC zbm~9?u4A+I4;(tVuf`c-hEHQ?K{&|gb>-~eOJC6{6=N-(w?V9FMzjc2+iq8_6T0HIGEVgYMzgM zaP5rxQx&e0E5UV2D(J4?v;t1{{iM_)y^8&HDG;t*nxIj2es)}x`?B{6>kF%RAQiJZ z9!9QcD~M{()#Q&UV)L8d-+4lp%d^)ny})1LrceNm>yMWFWowR(0hZig#)KkweklS$ zCBN8(U=nq{EB@Sq7;6A(4>a8tOA5?})a zz1h#-?hWWnGnCywE^t1UR-1ONS{+@E7>XdsDQjttA=yL%hKX^pYt#f8yXDoAvJr!F;t%7{^LBj zq~x@Iy%}@&Bf@bEnZnpdqI~kzgfu>W7}ue_f`M$sWzQ~K-EcM&Yhse!uo}c33)nB& z`Ej`45a{dg&%PLt>|d-?T_4k$ZQ+;fBKn8w-1mFk^jCWS#2tE63hTLIBevt+~x?Lvl(eHln5IHI)~(IE5jc-YkGu*L@OrbyHk&tgVqatg;QcCmDM z6Km#s91^v>X?TvXO;R4umQ0PMBnom25eKlXE$i&s{<4tkZn&a|`2S9t-Os&f*6Iv5 ziAMx6$Vo|k>mTq-Z_jHK)vc~QFh^$0ZX&7nBf7qDas*!kL z-n26vXJg&GALj2z{1gMh=OjODd9ks5zNSB&HeWiLIJBIt#V;E7t$l^ActCH)qx)4% zj|2^Z{S!oVUCBnZqmtZBF{NZ>DKN7&f<~)WH}hgov5$Y0r5a&>yY+Uw$;W)Ru*+s2 zHUSGE5Ib|Yg+cDPlXd6EYJ-oP%eo7UM|Q!AZ|wOl_OgTHn2Rg^i)@^Vvgut+ll6$> zBx3^+zEztt2g=rHZkhHF%!jAfUz`oNpcmwi>E>!JTBu)u4r%2yBi^U(j2nQXXXUg3h1Z$~q&y~m9cBIEyips?HfsPA2MeO=WEY6Ir- zu!e?$npt!>UEyQbtj(@_lbzQ}vQG%?E=sTbb-J=I!fU^c@*P_t>XZ_DkI0W)Z7CS^%w><_SZ=yJVhR(s$ytxN z zTz4f^VUU9P0f%?pgn5ZX*Km-J)Fsuq=c>zk=OYAWOK5CwiU)Lvu5k7(F2{;seCh6e z_EUv6a=)qmQPQY|RKuLM?#iN!qSZTl6{gtus~L|vEs9#qSdo7RD34agWVDKB9~aS^ ztL{I?n=kMEi;dChDuTpbPLST!*)=b7VI31$KYv`ib5opPO;w?$_?z@aragr02>}~nl1T6o6;JS$S7zaxun2A;1DwpHce&Y4(ILT7K zXuX0{!I>?`*cW#Gk8AkbY8D+UDra5O(}&cED&g9MT)Yq0oXv30muRb1SWZ~K*e-%8F(%|p(;~^P0$?i} zKaWF>-~CxPZn!QhaIvoay48KsEYxDSD9>2MA3v{l^>R96)0Q^P8J_29Vyem_1Sb73 zI=)#WEj4BBx3zBd9^ayn4&Q+b$%2rn@or#!fY%JBINmQ!CjweQ)Aw@|)USfex<>oN zmPEi;!HxnR$*>8X_4?DzTBvL7eP|lgQS|e}2acdn(yqn$gD$t7ptjMM%?~hnWiazN zuADI_^Qxw@BPAu3!-fd)rs?Zsbuiea`sJ~!0}$fgmH!Z&K6)!m2xq#gL+Bz3Ug~?7 z1x(5NNQSova+O>gV=^Q+9})1Z_66_o_Pj?x3v}G*ED|DB+Uvi_;laj-}FJhR=NNmX-4hi zl9b)`CJR4lSqg!L=|p(VnV6=8j9lB35L*fG_pgB4sJnY6+B5~Q+lw!j9FjkYDTUqdBRzMFgOOc<-jjT28Dr3~fta;t zTrUToCxtzga5>H!G(wQZ8^fsUiRsY=*^e0G&-D8ERBWJT$JJb3^>2ahlpF-B+TDAej=*?~K2fMqhmKEHZj_@uK!#st19X35Hg zHX=(QsC~$BW(`|(|i0B|4)su2c zPD-X^>iF^>IB{(`sNJ$sVwQse-yTH$CXeIx60d(WAbjM&WP#g#?&uITbXkfmSc<{N zhh%1$;vb1_4972_ivd$({&bU|%=1WD6SqukC~8B{s^u%>d(S39V#4HU3m^NXqbmJh zyNPtOE9HKWN$y9)hG>>d?Yw(ZtLrPKR|5Cz@}r-8UfRM|eCGEhq-NxEO3Mr!HgOW8 zCa-{myW`gL_3gzQitkv*oOh>wP`@f2=3V4^a1TrFWAaLM$UE*)U-m!*wFOxUvWRBv z;VhkAA`|(^l*bk2B?wGYVyGi7%dlUKciOd1#>d6k=Gn2R)bC4pI)v$HJV-j6T0`37 zvLdIFK3CMQ#Ac~?+b!E?*^lg+VYwhK{C^JIMKFXVIWX`XDk_fmqHqYjL>%qA0rNmy zsjqM3=(`vw=*Outp_4@x*#VZ2?EjV3@J^gYRzs`hl^&Q%)HDlA2*m)SA`C?X5Wtr#{mPWsN`fDD>V@R z;@7fH2ACvi%bwGMr^MK1fD|D=9ud?(2(Y(Wn$}>!*NO^%37=!n73Q0E&ZztqD0B9n zL{qB==GUJAE)Z}q6F93c^tZ?Oum@IF2WTkM;r3?Ak;F2D>v*42iQ5o*k?wdZsB`aFJ6i)oNuq*5wK* zZCXtJ$fz4!T(&VPy1KZReXBHFs3|*Or8Yw*AR$f=V<%U{D*#UHiADXuZlBG#*QS@m z!bSl5diLm)fsnWkK!R0+sNDwZZ5H8g@y*>=mt2uTN%@|XK2jDTQ<~KS&wQ7ubEDCvnFCod7k;~G1GI6ShmX>LYl1X z6A})k8YGXEWmSDe>dLrFM?Oz(y;%I3T8RbLQ8VDQcVxsVfD8cc-j9zBf#3gF$n%&X zz8muTrd~vxcMJky&{ry}ccO&Hoo=!kZrbwxySd;Qvxde;tD0^Htv;A~V!CAB5d7g_ zaHlr^=qy6v$k-h5!#{oh2dTBRb-7G0G@OZjUO~pr6{=1mA+TL4L+v*_2<$*Kt#Z`t(sV| zXW4)kH(~llLdh}U+(x!2;^5FSyS*JX2j}98;~%4*gxKu{e$a{+k`NS~mZ+m8Z>wr* z-NqX0_F@`nW~jssYDjiZ5X9(@jAVMG@2#aYt0|8u)A}U!>-+QncFRzp}EM*m!i6s za~@FBoY{QX?a00Tr)oP-O>0U>>lB}h@VTrB}15@#m2aQtA7xYJ>d3qqRvR_^HAfb~G~jdfew zx>T-Dw-oAt6`Z>FxB|TSCEY0?3TiQ*9oD}Zj96mYE_)@F*0V!UYoy&-sMG+v^Hg@jRJ|G5A!AYpW*n1tU4* z8jMXvKE;yiwR0taWU{_%FspEj_O6{xvka>?tG0Ky@|JzD8z}n=W9Eo0wjGdyeVgV? z1sN}Cm%BDt_J?!TD6lZPz^Z&@(ko@!gq1;PXybaYQR?Bd@Ickj?cZOnhKZw7&QKb{ zvYq|5f4|D!Nl8>z_Dn5UC2NGj5-)q;z><@I!O?yA(Hn9LfGVSYwHkVPCZ3KY!LzsGQ4DQpcBX*YC`}{*#YU z`zG=JvXR`uM#;<~1|ypJn;(2_w6=NSL2fG$7Wdr5cvAIp!kX~nRfho4@!Wy&SC;xk51cAm*<#h4edjnWfvZQrS zcg=Q2%idhT!}O*eK`Pw-c>L^w^K%W0^A7N5YAE6imMB1!;^5g5LdM4w!IHZq$K0dd zHuXmt>)ksM#A{3S!im=l+Jj>6;8TV@{8 zhc;d5R!~kOzx?KmXnea_T=(2veHO&Kb^1B8g2a$_Kp#9|g=lT9qcwn8;kvuKc%gQ7 zhalp)9pk>Dx=g-U=~Jol#q&nC#)5NVsb|yffnL?_@3{Va6Wn6!q2dSCCISayq#>hW zl%vqQVY}GRC+@L#cS-f`#;(ngwZ0j~B@iA6X|Lp$EY4QwMffu{ffJXNi>B(`bGux% z7bUqCbBaA3+uJ;6Vq(JfV8<>pIy#f?G-(?tx%a9va`|l*qA1Wv<3!oM`?Q;y!Es+; z#L2lBO)fuezQ40OHSYVc|+Hrmej za1_bFCKd$HdzV}Ik*b330bXkhx&fqm0%F3;-#pN`05Ou7t&O68_^RFUa?aMNQX6)Z zNnecyoA&n$JS=YYl8_CW+t{y!NBf|>uW$@TKn%^z{kw(8)aoHj?u;GQp(C{R;vhAd zkpf7~W)=g_H;>Z>9kR}8dp-wqpA;18ApfCKL)VJO$N>h-gjaBWYpBH2gcZ(8_s?7Q zviMKJ1ZZRg@>-ard->e_!a`IBWzZ>jsQD#%Sy=%gc&>WxR`oe2PK?mdvhgoh)g)B) z?Jyh4?N^%{3i>sj6sN!S*xeM*aKgg?{F&;6N8vl~>FL8FRpd&iX z+h=A<-ZiNnc>Y%(GJ^2MToLJ?uU4|I;q8C35!;(Gve%*^*K?EeU?B038tm+qe99d% zNTM5Qb3e3@nyvU#2x4SOgd_pZ;^+|Jrk96^`)B)$1F3ScC5BVC)fQhoC&LH~IB9iLae8l5 zubd2qGHe;mbzjQxbRO4Fm76!hC*h`g(|Ssc!Xwqb@IIHs88s5pu3vzmFZt9jiFqN& zH?IYt>+y16BrpANur4p|uGU`rH8+oUKWn+t>#(t{MR27V3w;)+66@@j-j^zu6MUN{ z)#is+K;+r$#c^6-cdnJ4%%fO*t)c3-{b{MT*sS(ju)%tRp!Zj~JB&+J3G$1YvbcAP#yGKI81OIPdK6-V3i84;t9HeiYeA1l%X-yrkf~Je+|1@MQf=fSS3Wb-|pSgV&w8ZN!aQKG}9u63N{-I%f9+MjN&DQ;y zmb`+_w3n3?6WHx~kLpnQZW25uA51pKc?->qz;#m*gWegW^6^>A;#;kkKlqZ=3!`^J`VhvBK ze0U>r<%+mKI>8aAa=hFm&tuMfy_H3E&+6cF9qikvCh<~;b>u6^4Naw-)>pPJ3g{i+ z1N0^V*%J_Ki-*KQMX1bK@+&x?dC;sX)j&En`aXTVE|_RqeCV(VFwojSQO8caOjjCc zjQd>JVCbNHVzWlzHytIeTJT1-x)-a&ZW|nl>rr=J2h8K+QucF(;~3IOr%epIhbzB8 zl9f%_4ml7C9C0>QWgm4(>+8ThfSh4%T|s>av@%qC1R3^%HWqPFt&vdF-x4SYnZ)a3`y(ZMxb!+yRn)pru~!lfC)9zWUe zg95q%P6wFyRlAF)^_xEu29**L@QGXmQ`gR>ln;B)2Wg5cSUYb!UvW=_o$XtIfr4Xun`RY9{T)8)I`ch@GdM_oS zc~O?4Hb;`SzX@b5jhIc!G*{9|lN2BTPPLji(boL}hvM5R6_o4|$FBC&VI@>;vc>>L zF(OZm2+=mR-j1w2-la~RsQ}iy-0lRlZ4DeS@!7uaH-&l!ODq@SRkrOX0La=zZ~`Sj zCHD7ciroHn43IWe&q(93cKqX{Y@A(dsU+5iT2|gdSWi%dwV+AMdMi{po^WRie?fS(6j14e(UOQu1cG$%qEJ{ukPyk4kfb5;ryfO$&p%@cm`bQh}G_8&ixa& zdMEjZ3(d{eD0v`f{;5G`2Q9kZGb-sUQ(J2bfLFm(*u+pgqT>gWDmMD{I( zGgp6cG-ItstI%Q*rTh8eM5Qf${lGe-+8h*tLGH5`^OR`{XD|b2SpC|eK@}6n?Axh( zaTrN!#$$8Z(O8Q zeXnmRQ6~^Dm8y5!*8dm!e4&vyAq}KR9T7dtXUpZL1gDx zfFY1VPK;ZhK~*akiLvNO6gy4miR_GUT6Vdt5o|@%spYK1f8jC^S;IjYGhm9lIYMU- zHL%u4xoSZH0R5vSv9^SKvJ>78X-Cy=o~zZ%d?XL|B(wp3C~o(v9j%bud?j{fX}mz1 zkHlT;m66$d{)g$mY-!2UYIixUg}wH$KZN^kd5R4 zGHi@d#a*ICuk9NsemvbWKFMHg{k1Q$^ zxB)y>```KiK*pT|&s)v_7_4`oVbKgmas zkpg*7m`|-Nag82m@=T`(XSUruo+58g6o?w*{4t+xVkpy)s&y)BoPXJ1tR`{gFINJU z6nBBA2O@_pz|5+6s@>K!KU7~E|*3e@c4xA&c$ zoxbm1J$IYANm)SWWNEIGgecW~{dpeop43<XQ*N1@sAr2xtN9xOV85_VX1!hpg z^S{-`jbmnyVq*asP~U3ovd#jg0R@u4MYk11Yq)Fx12hn2w>Hp)XT3Uu_)2J7HCc^& zy%bLHhzWo-K5+s6wR|?+J#Fx%6+lpPIfOj007##7Wi# zh;1YH>n#L$7x4}%D^8X5tw#lP+VC;5DwEqUBCAm7m@Sah0N`p+S8ReOLAW@0m-Hhr+5DcJ~_{oygk8XjT@ zf+DDLIEKk{JCKE5opQMX9`XG7wS6@WyRSB*YwKiCIU6fjI+(k3(3=p>VUoD4FNF_< zwR@gIXQ;*0zWm>-la8;BmqRW5CS>|8q#n8k+`$oWea-Xc7*qsxeG@qUuYH0hpd+GK zY=6Me%f*}ouvF3-IG4LEaZ$&JCt4!3>VNAC;1sQ33g1=p?d6!eU&W7KQ%`=-vL1j@ zj=~pXwB8DfiIxy^A2-(mAZn>x?WrqZ30>__G(cC2%tQ2`%%)UJcWg@7# zaExZah=8VHiuj;lU;i3de<~=R{X7Vzbb~K)ITBBlHEs+(yY4p_u5XTA-W0fqK}do) zkD^ULpBn>gR#%fi9tDM|&aU9pUN+80UDhqF&-tP(!)CnhBf6&yjS)RNKxPv7a&00eX<|Ur$-!94bM}7Cq5a0^l4!Gs zl6%}Dp4V^z*e!}V2&`7#3PtyvVo`*Ick4wI`R1O=%ja>+PXzPH#Ih6*j#a$ z1nh6u6FqZFFaa=q>7JIn_Isyamiqt|lg@U`_x$0Y^(z+}>Uvt#yzi}tS z8K7AB5lJzXVRi3BUk@!gEiPjrGuB&ny6`xsX}4?z1(~lcvUoZK!uMR6$8>^sU&XWQ zQac}#Sfa{~Y@vYCWvp<^`83XBjHJjjgrJW1$%^T$BZGM<$n=Cl^^s4sLbvIU5PEobO=D(H?VU;iWCiO?Wr$1~Q|*7i@9;x8Y2s5h|M1VJtKJ;(CdTf{Y1CTxL! zvBGu3{QIh#&MO&ojfGQbhtPd6(cD?{O6Bd(Ud-P zb+X8~5~uB1i>r2yAiLvcU1*|Y@44c~xX8n@u4X>$OXXp=!y2394<~TEH+~aezr=Bx zWox;8tToWT$A10BO097|d!iO5aPgBKg*Q!}xlUBw;jP|N(iVC6HE`m#hXZ59t3`@a z)%Tu*H&emr?@CXDK-xh$b6S_lp~77fS{GjDG1(dC9KIprJD6r18P-YSx`d-hkd$j+ zEd`7QCa_kY^@$DAGk}m$AQ)e5DttchM zv!W97jhk77KuguOLd&t(Fe6M1pmG8o2yuK)$v5*@uM%o!Yy4|U&l$(!I7ab@SPc!6 z3BJLOLa=8V|5#7m>Q&KUS+Q#g3XEM^7FL?<=$%9;hN$V=ea+o1G3@Y!Xi@`(aR6OL zHUssp42OlJP_Xufh#XlRb$lQJav}tuuasB-nEr5ffWHzzo%mgjc;+W5u0-eMTeRpO zg7yKaj}MoucMT7OlvK97004zHx#ZierE~GDaSq3vcLVfX(|+AQt-bw{Q?hM6nyQmo zjQ86a4C@-XxC~;ZN^C%yg`D+=!z?UYLFvX4#R3&t@BoeqXtM#Fjb36_&gvZwghC~e zQ)!-A7>S2_65wfkg7X@`=M6xSMR_o!=3zZ@pLM8gL;C=`O3$=#MISff4WtmQLKuQv z19gJ?8&)4j-U=(#^b0{PaI;+kBI4w@Pk<1^2wLsJYS%6#0jDiTRlpP{lEObMY-}kN z73WtfHbMs9lXRWeD|5ha3Zbj(PE+Y6lDnFC{Co8GCZQB~0OWyy%7@Q1OIeaXEERc$m@LIxio#M%T_cD7(JvwYV3uaU?1K zRD-ql-(G;^xxdxttgmEhQ!f3^9W&K_fEmmc3beBe25$(Qjc9(6;&uG~FJc0D*N8tR zvIBibbIZ4HQgA&)jR&ZjeK~qQ@^y{Z0F7OKcgo*CnhQSn`7{5`aO`;HnSu_BqOa5T zM%&B7>a6%geli?{Kjs`d7dHJvLp8U=;*|*q2&8iR>Za{$$8Dv;nWujL-W%rWyjrBq zZU5wnM-E2>IR*4jb`jwte}XavGT#V>mNM9|g8i%cQ@*`6Ckm z2e+fU%w@H-sG)=lj~4gX(#qDD-F%wptM(wA`2r75N3Iv6gao}{oD#LS4j1KJxa>P8 zhv?zMhqk4x8aK|$k%Ii-=;$b|R{+13Pf;5%g^+Uh?$i_AM~^t*fbaVjJoe6B;^e## znB0;7oG~dUM|t-#tir&+z-#8_8CX%=xHd{&jQ1HJ$vccJG#Surm`S|#XD4v?tUkwA zeOvXYyoQXCN&9o5Ha9;%e`MMS|L$Cw^R*8pi;GsTUcE|6OT&c+-oyW)(Gd>(@#9C& zrZ>^iU#$e|CfCNgQf_?v_RYJ&cx{gFTWf3Gw{Lh>f^qgracwwnT76;Jr-jRhLz0u3 z9z1w}zPZJ2r1^y+g`9t~KU^lhr(2Mg*2MNK?>-kg+Yva`*UTJa+wbSl0)mho!0y2F zY@9@OXjnTjBvbyFF$YnP1kYBiCo5^o{Au0zmHNa%4HN1X>e||;;b*_Pap#bOdWN?* zMW;##*RSbaJ_qN;7|4Vzt=b;ZYFjaS`r8b|nQ4V!Mm$d&UQkmlEG&{>A-9^F2A7m=?IUF#D&n1~MJh?wvV-+S<@7uv5Wpicdny{peYX_a}{o$f9lS?4L8u zm95iXhs&KWD3Iac;9!oJWIpxMa(8#{>~j!4k8wLfku{=2-9sUK%)a#XyLXFkB2hGo zMAm;b7nm39#_v@YdFXe70hsm$ZAMFUp-m2N_Cg~{4% z*TpZom#Zyt#JDrW&EL2YV;;J&@g}o>opVSAqd0r^EV}c(7IIEbTc4`b1>)}GE#~}n zPEf>k@m~2S2Z{H;p?#8e={PJj>{v5r;0^AOdKA)DZ=7EkIOXeh>rUY8<-W~dHKnFh>?bC-yQ;B{7r6 zFIM8Xt=j0o+3Q`&!r6aFxDLds90l1hXiq^?4RjcmG4>tLYaU@+l2@g=qYkJ>6{b0} z>B%`(sm+_%pQS<}cTlu;^{<ZGPjKg@qI_3v>-LGNj$zD}GGrXlgb+J>&(B zo#f`2_19i)rxn~S?>#?Zx^Ue6Q_$GNzo7c~aZX3i`wy4@>GpKx#sE*c$&;((_cIII zA6T&(RczbckASvn4T*sxL!T6d(O*Ug6gIQkO$8cy{GU4kFxyqUTnk|q1U&O1?F#WN zx4+aqwnrdQg8x4agQ&Uy5&*j~mGj~21(eWppZ!}c!A)zZ>t5y=iMo436Hir93)8Pi2oi6;%T1a(V3&)hnI?EFc1LRF90Fsv$9wZ9$tx8k-hz9 zbG>7=+L1?w1RMFw%gG)q)j+EnU=`iGLs^Pl`mN!*z@6MnfBBa$Dg#nHB4RjxeL{j{ zWxE1VRp5V|F>LVfR))XA!>2c1IOW4R|8~p%t?fd$251s+Tpy@8N)$lL)n9p?wxkjj zIRdFpkT|4mo_kiYS2^?x-03;q$rUjpKlg-Z2 z3>mvjv*ho|5@{yXmuRq@GaigFt&4XhYEA1o5fVhJsOj`}1FEMhy>*e_@T#EPB5c#h_qjv-Rj~{wo(fgJ9D; zS_%ENva-@>CoFK2e`KrE&(Mx3lWNVAB?gr0qmgy*U(JXSZ^yL789hEJ+WUuoslEFD zeCPc_y}tc3adR~B{f~)bp#Ja!z{-C&e$h!RHV(E#ZAw_nj*2o!?qA9PMNo!M)d&I= z+zDk(zm6kl(-{>+&~`casiPPe0Q_msdSPWF{-L>xd8dsI`*vw}FX^g8M9C>P23MV= zee4Y=sk3QUOoO#-!NK!vh>yHTSA+nL<3MjL$Sl9_7O( zb%}p*SxcUw9o>03Xp}xLWKz9xV7lm>;2c`Df|2BY)YiWu-e!GVXES8e=sANi_UQ3G zgT#=2vwQExa@Y1@ab@^!W=Apl?h@|EeFV=1YW&YDLo2vDJ)ZA|JX)nf0!i3`t!eCEQ2wY0QEb^d&PsOO)HDE#2yz;|_Zg)aA}mGo*d&=9w1p zQCdVoLI@Tv(O6?JEfo#Tb#-;}s;#124!i!PE!S!`5To2tf<)TjHdYP22Y)s<$A6VR z;iV!7qVbbYc~)yEWo^yP2vm=4!VM?M8 zQmRXWZPa)OsOZ9Q_q6@{^mKC#6~|N3QjZ=z`Zg1m3wK_9U}knvP>>R+WqNitVbHVV z2U=QMx0qJJ?MWAYX3Hok1rHiQ65ItHn6$xM08^@lEz6!fT|T9@$g=NXs};*-m!>u| zCkJGBd~Vg)bFh!FD}cJulcRz4vLBPw($JuUS=vt2Ec4vH@ScX|0H`Pt0!a zc|R0~4WkLhIf34LQ5enmh?!*o!TL~wnDcTHOw8BU*TRqDGrHJYOG7(NUI#a#u^~nF zqtm)UU!!ulxzuTi2HG?}8+cx1d~(8EJLeo)iud-OPEAj9TpfzI?E#o4+^VOqFXi*+ zA2YEL9dO$gVyXgW1$Iu(xHaLd-awSa!GiGc@Qm4Aesh~6AZG4qSPM`y7Rl3?-Ime|Xa3k|kk);HgQMB3`ftWQCcl`78ngC5O|7v7M6f=G({WVPF&!0avrUe``=gyw3eZY@`Pax~7F5-a9*)LsM z4{$a#L|`P~GQh@X=jNaz>^Xa&L{n5$6!2@}YuB#9`>Cj@;rG{_oQgZ_VEtR3-yPYG zch~&@#90RL{H`(RWMZ!b%lps@Jx+q>6~SBKFKMb zIqtufhs&d^!Q$u<`na}KtB8(miFT+}fwXW95r?_L2PQGbo*2M4YHI2=m{F)kR{U1g z$TlAg1iuBV#-}Pd@R;70&jbSf6xyW3!SxdI^YdAD)pc|hHq2-OF6&p!?luZly+^aX zz-<6MqzWY1i4q8G;;UB#^-WEj`@j@9A}GjO6RuIeB1p)C|1 zf#k>F`DOOW9#87muh)Bi{!IS#=~de_z^=gDbULKV3M$x^eL2$M%z%W2&l8;&cTpX% z5uj1Kdlx&a_p~&LVabpO8C*YcCR-!x6DOlW_&WKka_5yjU6s^ci^K5K*&4LGo-5d< zu<uKG!<^oJYLP8U`$0+pA&JO-&I;wh>lJZh_t3{Ev zU@AFjSMmfc5_of@`x#E`$Q96FV7*%yhsJGp2tI=(fJU}=?+8-5{L9KjK?d1jChfKp zjra69EhHMS zNf~_$QE6KdBIk*iSy)cFuS*VZE&~_-V{8Hq9~_*YwX*^9e*q`wqP#83EzYWN!Yd;p zd?MIXGU?irv$C`0?Ckj9w16Cv{}`o)^ge*Dzd#EX!j{dW4A>7buV642){GR zBLt7GiQ?FHEY)q1#4)&vbMIi@FUZ>qpl7qn{KPyNB@6{y>OMM=`(zUJJ9zQ!oCh=Yl!qI zkXj($S@@A_3=R$DTMr5LTNURe&_n_xQF~y5ieQTcdMV1i|ekZ0f3KsAzbzT{d6aG(mF$45P3o7LhJd- zEH6*qkk#9=o=8GXfK%VlkWT=x={FrZA6vKwI@fWG@6u2ST2N1RZ2|yI`HopZcgK?Q z^XJdDOM@r!!#{^P-G0bW26HTu`Onbopw1v*ba#Ya<2A|yQHxbMnd|B!r`f60M+)J0 zLtcPjO$H>sFzHL|(qg_9EanMl(^du87ZVc$y`=d9G=!s|4RlrP1rB1UI_%Uj9C>>k z_IXWrkFnS9ZI)`4J8{2LW;-5+Q*tdh>4hMA9$kf^wBw_dW4Q9@!*fvLjW&L_0YZ8? z_+IuypS~L*)GFX|friB6-NeZ{V#)fkF^E2Q#m>VtPgP+g$*rjz|535<=HX_qSJoIyQic*-P{c77OS(_h}-k_u8PKaVMn3g9{<>XV) zmyZ}48cGdIF;T(h{9Q@^uN0%Pa3Iv#WzSrO=*-Q{4TIQ$`G!HZM$=(oJ;3_pQ)c`z zor8nW=}wyV@uM8adl)a42i=TXh-gp)NGd7OTvBC{2MGM@u@E39sE@!5>rS>@BTLTM zn2CmlriX~dxUi(;;gf)N8JMe@3$40GB01}xnEc~y> zCGhv`+FEG=LBX#N_)-0Wlw0i|4bGqaDa7dqrl!fo#TrYhU+;K&O3+Jsf)I%IzWYbB zLwPw_?w+1rFbtrKPb1~#CZ?^et*WP&?Zgg?^I{EzC2mo;_K@LtjltZ~Qe7-E@3ZTM z>0fOUZN(<#HbPaDpmyW@q{_lxrRbRBEaF=@6Ck}wMgQ95X2h84dn%38wg#QbU%{K&CR7`WUxT=7TIBAN|r+JkLYWBJr3Io&*y`TjjdLqkJ)R+dkf`H5!)@}nA49&9X#vt2zsFFt+ZDyq{W0lWLw z(Q!jg?v&ZV1tz9@Xvfu8AOX+1%iA)V+K6;4Mv%aL_@SdQnNwmk#0@gj6a5 z8~{%82cj5{OK5j@chLe9AOp}Wdx!u~CsTp{tM&86C<5TEt#sSYgV_9MYYWch)8^Ke zF*vqu`7jiy&L5DIhRvf=$$hAD0XX5Q+F^ny-Pzd*=X1f*+`Q&@TipBZxiMQ|;UQv> zs7^ubp(vhe_)0`C?msbMjD}?&A0G?9t0`b|aL4kbFJGvj5~Pt873BU8#gH`u@wMMd zkbsDY>FU+9utG*Lc<>?_faq%UU1HYFq zg;!zY{A(Ub1+XV;K_N|3a}q)ng80;W!a{~!oT46pwt`kQoW9jehJ2cNEsVXyb7p=fOJQ@2OM~0L?4{r z#MYJs_&6|apdq|8!B)#GFcUOTL^Ff_{-9>u7F(Q^gF`_92L~4y)B%!JVJK6jbm2*p z2uA@AmJ>(U{NAFITa6dan0jMfI6cVBnP@0^9+p^P_TYh{&ZSBF(nuQ5V0m&Um4V91 z?uE@uH)J5-2%!QZ-kC&O`&xykfR><0!6@U|+T$gKf z;(GG*2e-qq!LG}P>77)LFZx15KNJ`aT4G_nSxq1wWsZ$`c`9v3L(upJ^o>c|0ktL4 zjG4dg$P!uW_3$AJ0<{gMsY6IjkQ*gf_#mvyr`QeZ`#{RQpRECT6`~QaBDWS3UV_)E& zriR8P!ae}|00-~3gU75-USQH@M8pwsAYnkF!~tv2Lx~q-tjk;=0lt#ji5^}2! zhQeTCO=88(lE5U0TzD$=SN@VR5ee8Ck;Vu7z7#=irW?pARA*Jd&}yDQR)VFkU)dfz zOzB%lI~*n>Rr_O%f}YbWY{v-Yr^P0VqU5qN8bno76F0O8E40Z6$|D>$r$Lzhq>aiR}EZD}n?&Qk`2SR@NC& z%3Z>myQ9*cLrL9pn2%>7ydWduL!9Y)&Uxw5orIj$&230Q(6zYXnG2K=Mowg#8iC9> zD!6Oy-DOVj#c)9J$%2YwLYt{io9P)B6{P!e2oB?z^J_O8;ROgTZIqzpn%9=O#NO!pg0(`xz3_xZ<0~sR#KEU!4Dg~#cYamEtAb?Ic z%AQ&~Iu`xtjEjtX0cM#3dng!32!@A|)~I;v7N)l=kHRHxZr|nycUlsiRz?Wnp>a?O z4{fR|B0f6aTHd~?_Aq#I$0l>xtsl?uA_@GDiVd?74+nne43>H&6^y(>STP6i76ZKZ zjd^tMzLN|QOSs}Gc~3pbxDAT#6Q;_0$3IB`rut(GfH;eL)I&PWK6l%pn$D?3Ao8Jh6tO}7J$QPQ94S{%N zmczMDhUH>)?zRm1ADnRTi-$^W)GvzF_J6m__|ZAEe!fc~=TT-`@UCu8GSk}B2(*=U z<9Auw%750^?eqCDkWq3B#5?PBooc1NN_YsFb5_{KCAcai#$5P(CUnVu=5iE`uly|D zk<~iCy-yP?ugygX*R;($l?P6KsTx^~94gjhW4fVsx9=)VaL>y2g4EPS_tD%T&8QOn z+^aE)e>P@R8tilps%4j@&mK&<<~%OR@~l3dLr3l@B4$EXJ?6Y{wOJI_=RJm&iVL~2 zBJY;vlsl(uIM&?T$OXC$1uO~wtc+c^jSjna_NBVfTG3JAw}y&CiB_~Pzj5>PRJa8F zBBeu1`q%Ild)}L9MHdx+gwFxi!hKzuqa)cZj`yj}vd@RHKMNP}7Ic276>UIbhSUsX zR&IRk)^Vi26liSYb*%2TS(34SM!ci^oBl!Kqt;T#!=zwxy!TX=5=8SbTfa5!lWs*VL> z_f&rByb+a!U|jQOE;#CvlbCghj#8RBwaNV65VWj_gruywKtD*o9v-sl)fWN{stmco z532i|%DJtFF0+7to!9I5{E5Ezv3{=nqRO=yVP(sqOhE>T4Gq^`$0oZF(~Gfv*E)vw z^;X_tm{*xaKfE!md4(!xp*Y$xQx8GV*vIfuR0#51VLP)+LK-99yc_Y=lJnLy1UP0z zO`kvgT+a;KW)IZv+w(JGZ~YuOv#V8r+%t9{!Oj*Tu3E|Jo?D#O6ClL#37Td^kg9Xx z-Wa;SGCqvJXRiNPpmXZj04vchSrvB-UoMRwSNHR|$F^6ckvQWzXeGEQ<{`WJ*xA|H zQP0owU`K7#jq1a%d4*QQyMz?n{B35$VNB)b=!R8qsRyvPGN()#PlYz3L$`Ihc6=mr zIAB89^GRG!vG)RJ6`aXv0K|v}0vxRDzJ-X3KmA_uL5N)UFi=rZS)nh=5v_=R7(e{8 z!@Ql9%H*i!PTc%(YWG~+sBgOjGIkO5GsPzY;vthMbdE%jBhSjc7Y?}^ue`tym@2E) zKU3?urK5Ws-BIzhP}G|@Z&LOpwf*>B9`0ix=j6zEy@N7{-i6;~c;^_%C!f-FGklAv zE%T^cKUYPfT0fUX&pF;Lw=d#D1b$WTg$F-e@xR?mJ1!cCJPF04v2f3-8V-nRmoo=T zs@P9&?jQanvQLeheRLij8}nUzb=I8S z0@rAoLnrSadK%cB&tgBlTV?P@C)lZDW+R9TBhzUe;k81A&%halx8erzAW^8_JG3#tv`uo@Q zFFt+@ER;DdvDm~nblTJ1Objpm7d)z*2ecx`?Dp=m`ug=2efC!^`40!odtQ6899Yu# z?cStwI@_G-!1vUimDc}V15cF3R?hqWuCFjqM61i`*o{<5@Zp)qamx1A&8@O_~BLGajSZa_z;**TlY3{C{VF^;^>FVdQ&MBb@02Hxu Ah5!Hn diff --git a/dev/kuadrant-operator/doc/proposals/rlp-target-gateway-resource/index.html b/dev/kuadrant-operator/doc/proposals/rlp-target-gateway-resource/index.html deleted file mode 100644 index 8b426833..00000000 --- a/dev/kuadrant-operator/doc/proposals/rlp-target-gateway-resource/index.html +++ /dev/null @@ -1,1781 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - RLP can target a Gateway resource - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

RLP can target a Gateway resource

-

Previous version: https://hackmd.io/IKEYD6NrSzuGQG1nVhwbcw

-

Based on: https://hackmd.io/_1k6eLCNR2eb9RoSzOZetg

-

Introduction

-

The current RateLimitPolicy CRD already implements a targetRef with a reference to Gateway API's HTTPRoute. This doc captures the design and some implementation details of allowing the targetRef to reference a Gateway API's Gateway.

-

Having in place this HTTPRoute - Gateway hierarchy, we are also considering to apply Policy Attachment's defaults/overrides approach to the RateLimitPolicy CRD. But for now, it will only be about targeting the Gateway resource.

-

-

On designing Kuadrant's rate limiting and considering Istio/Envoy's rate limiting offering, we hit two limitations (described here). Therefore, not giving up entirely in existing Envoy's RateLimit Filter, we decided to move on and leverage the Envoy's Wasm Network Filter and implement rate limiting wasm-shim module compliant with the Envoy's Rate Limit Service (RLS). This wasm-shim module accepts a PluginConfig struct object as input configuration object.

-

Use Cases targeting a gateway

-

A key use case is being able to provide governance over what service providers can and cannot do when exposing a service via a shared ingress gateway. As well as providing certainty that no service is exposed without my ability as a cluster administrator to protect my infrastructure from unplanned load from badly behaving clients etc.

-

Goals

-

The goal of this document is to define:

-
    -
  • The schema of this PluginConfig struct.
  • -
  • The kuadrant-operator behavior filling the PluginConfig struct having as input the RateLimitPolicy k8s objects
  • -
  • The behavior of the wasm-shim having the PluginConfig struct as input.
  • -
-

Envoy's Rate Limit Service Protocol

-

Kuadrant's rate limit relies on the Rate Limit Service (RLS) -protocol, hence the gateway generates, based on a set of -actions, -a set of descriptors -(one descriptor is a set of descriptor entries). Those descriptors are send to the external rate limit service provider. -When multiple descriptors are provided, the external service provider will limit on ALL of them and -return an OVER_LIMIT response if any of them are over limit.

-

Schema (CRD) of the RateLimitPolicy

-
---
-apiVersion: kuadrant.io/v1
-kind: RateLimitPolicy
-metadata:
-  name: my-rate-limit-policy
-spec:
-  # Reference to an existing networking resource to attach the policy to. REQUIRED.
-  # It can be a Gateway API HTTPRoute or Gateway resource.
-  # It can only refer to objects in the same namespace as the RateLimitPolicy.
-  targetRef:
-    group: gateway.networking.k8s.io
-    kind: HTTPRoute / Gateway
-    name: myroute / mygateway
-
-  # The limits definitions to apply to the network traffic routed through the targeted resource.
-  # Equivalent to if otherwise declared within `defaults`.
-  limits:
-    "my_limit":
-      # The rate limits associated with this limit definition. REQUIRED.
-      # E.g., to specify a 50rps rate limit, add `{ limit: 50, duration: 1, unit: secod }`
-      rates: []
-
-      # Counter qualifiers.
-      # Each dynamic value in the data plane starts a separate counter, combined with each rate limit.
-      # E.g., to define a separate rate limit for each user name detected by the auth layer, add `metadata.filter_metadata.envoy\.filters\.http\.ext_authz.username`.
-      # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.
-      counters: []
-
-      # Additional dynamic conditions to trigger the limit.
-      # Use it for filtering attributes not supported by HTTPRouteRule or with RateLimitPolicies that target a Gateway.
-      # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.
-      when: []
-
-    # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on
-    # routes that lack a more specific policy attached to.
-    # Mutually exclusive with `overrides` and with declaring `limits` at the top-level of the spec.
-    defaults:
-      limits: {}
-
-    # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,
-    # thus also overriding any more specific policy occasionally attached to any of those routes.
-    # Mutually exclusive with `defaults` and with declaring `limits` at the top-level of the spec.
-    overrides:
-      limits: {}
-
-

.spec.rateLimits holds a list of rate limit configurations represented by the object RateLimit. -Each RateLimit object represents a complete rate limit configuration. It contains three fields:

-
    -
  • -

    rules (optional): Rules allow matching hosts and/or methods and/or paths. -Matching occurs when at least one rule applies against the incoming request. -If rules are not set, it is equivalent to matching all the requests.

    -
  • -
  • -

    configurations (required): Specifies a set of rate limit configurations that could be applied. -The rate limit configuration object is the equivalent of the -config.route.v3.RateLimit envoy object. -One configuration is, in turn, a list of rate limit actions. -Each action populates a descriptor entry. A vector of descriptor entries compose a descriptor. -Each configuration produces, at most, one descriptor. -Depending on the incoming request, one configuration may or may not produce a rate limit descriptor. -These rate limiting configuration rules provide flexibility to produce multiple descriptors. -For example, you may want to define one generic rate limit descriptor and another descriptor -depending on some header. -If the header does not exist, the second descriptor is not generated, but traffic keeps being rate -limited based on the generic descriptor.

    -
  • -
-
configurations:
-
-  - actions:
-    - request_headers:
-        header_name: "X-MY-CUSTOM-HEADER"
-        descriptor_key: "custom-header"
-        skip_if_absent: true
-  - actions:
-    - generic_key:
-        descriptor_key: admin
-        descriptor_value: "1"
-
-
    -
  • limits (optional): configuration of the rate limiting service (Limitador). -Check out limitador documentation for more information about the fields of each Limit object.
  • -
-

Note: No namespace/domain defined. Kuadrant operator will figure out.

-

Note: There is no PREAUTH, POSTAUTH stage defined. Ratelimiting filter should be placed after authorization filter to enable authenticated rate limiting. In the future, stage can be implemented.

-

Kuadrant-operator's behavior

-

One HTTPRoute can only be targeted by one rate limit policy.

-

Similarly, one Gateway can only be targeted by one rate limit policy.

-

However, indirectly, one gateway will be affected by multiple rate limit policies. -It is by design of the Gateway API, one gateway can be referenced by multiple HTTPRoute objects. -Furthermore, one HTTPRoute can reference multiple gateways.

-

The kuadrant operator will aggregate all the rate -limit policies that apply for each gateway, including RLP targeting HTTPRoutes and Gateways.

-

"VirtualHosting" RateLimitPolicies

-

Rate limit policies are scoped by the domains defined at the referenced HTTPRoute's -hostnames -and Gateway's Listener's Hostname.

-

Multiple HTTPRoutes with the same hostname

-

When there are multiple HTTPRoutes with the same hostname, HTTPRoutes are all admitted and -envoy merge the routing configuration in the same virtualhost. In these cases, the control plane -has to "merge" the rate limit configuration into a single entry for the wasm filter.

-

Overlapping HTTPRoutes

-

If some RLP targets a route for *.com and other RLP targets another route for api.com, -the control plane does not do any merging. -A request coming for api.com will be rate limited with the rules from the RLP targeting -the route api.com. -Also, a request coming for other.com will be rate limited with the rules from the RLP targeting -the route *.com.

-

examples

-

RLP A -> HTTPRoute A (api.toystore.com) -> Gateway G (*.com)

-

RLP B -> HTTPRoute B (other.toystore.com) -> Gateway G (*.com)

-

RLP H -> HTTPRoute H (*.toystore.com) -> Gateway G (*.com)

-

RLP G -> Gateway G (*.com)

-

Request 1 (api.toystore.com) -> apply RLP A and RLP G

-

Request 2 (other.toystore.com) -> apply RLP B and RLP G

-

Request 3 (unknown.toystore.com) -> apply RLP H and RLP G

-

Request 4 (other.com) -> apply RLP G

-

rate limit domain / limitador namespace

-

The kuadrant operator will add domain attribute of the Envoy's Rate Limit Service (RLS). It will also add the namespace attribute of the Limitador's rate limit config. The operator will ensure that the associated actions and rate limits have a common domain/namespace.

-

The value of this domain/namespace seems to be related to the virtualhost for which rate limit applies.

-

Schema of the WASM filter configuration object: the PluginConfig

-

Currently the PluginConfig looks like this:

-
#  The filter’s behaviour in case the rate limiting service does not respond back. When it is set to true, Envoy will not allow traffic in case of communication failure between rate limiting service and the proxy.
-failure_mode_deny: true
-ratelimitpolicies:
-  default/toystore: # rate limit policy {NAMESPACE/NAME}
-    hosts: # HTTPRoute hostnames
-
-      - '*.toystore.com'
-    rules: # route level actions
-      - operations:
-          - paths:
-              - /admin/toy
-            methods:
-              - POST
-              - DELETE
-        actions:
-          - generic_key:
-              descriptor_value: yes
-              descriptor_key: admin
-    global_actions: # virtualHost level actions
-      - generic_key:
-          descriptor_value: yes
-          descriptor_key: vhaction
-    upstream_cluster: rate-limit-cluster # Limitador address reference
-    domain: toystore-app # RLS protocol domain value
-
-

Proposed new design for the WASM filter configuration object (PluginConfig struct):

-
#  The filter’s behaviour in case the rate limiting service does not respond back. When it is set to true, Envoy will not allow traffic in case of communication failure between rate limiting service and the proxy.
-failure_mode_deny: true
-rate_limit_policies:
-
-  - name: toystore
-    rate_limit_domain: toystore-app
-    upstream_cluster: rate-limit-cluster
-    hostnames: ["*.toystore.com"]
-    gateway_actions:
-      - rules:
-          - paths: ["/admin/toy"]
-            methods: ["GET"]
-            hosts: ["pets.toystore.com"]
-        configurations:
-          - actions:
-            - generic_key:
-                descriptor_key: admin
-                descriptor_value: "1"
-
-

Update highlights:

-
    -
  • [minor] rate_limit_policies is a list instead of a map indexed by the name/namespace.
  • -
  • [major] no distinction between "rules" and global actions
  • -
  • [major] more aligned with RLS: multiple descriptors structured by "rate limit configurations" with matching rules
  • -
-

WASM-SHIM

-

WASM filter rate limit policies are not exactly the same as user managed RateLimitPolicy -custom resources. The WASM filter rate limit policies is part of the internal configuration -and therefore not exposed to the end user.

-

At the WASM filter level, there are no route level or gateway level rate limit policies. -The rate limit policies in the wasm plugin configuration may not map 1:1 to -user managed RateLimitPolicy custom resources. WASM rate limit policies have an internal logical -name and a set of hostnames to activate it based on the incoming request’s host header.

-

The WASM filter builds a tree based data structure holding the rate limit policies. -The longest (sub)domain match is used to select the policy to be applied. -Only one policy is being applied per invocation.

-

rate limit configurations

-

The WASM filter configuration object contains a list of rate limit configurations -to build a list of Envoy's RLS descriptors. These configurations are defined at

-
rate_limit_policies[*].gateway_actions[*].configurations
-
-

For example:

-
configurations:
-
-- actions:
-   - generic_key:
-        descriptor_key: admin
-        descriptor_value: "1"
-
-

How to read the policy:

-
    -
  • -

    Each configuration produces, at most, one descriptor. Depending on the incoming request, one configuration may or may not produce a rate limit descriptor.

    -
  • -
  • -

    Each policy configuration has associated, optionally, a set of rules to match. Rules allow matching hosts and/or methods and/or paths. Matching occurs when at least one rule applies against the incoming request. If rules are not set, it is equivalent to matching all the requests.

    -
  • -
  • -

    Each configuration object defines a list of actions. Each action may (or may not) produce a descriptor entry (descriptor list item). If an action cannot append a descriptor entry, no descriptor is generated for the configuration.

    -
  • -
-

Note: The external rate limit service will be called when the gateway_actions object produces at least one not empty descriptor.

-

example

-

WASM filter rate limit policy for *.toystore.com. I want some rate limit descriptors configuration -only for api.toystore.com and another set of descriptors for admin.toystore.com. -The wasm filter config would look like this:

-
failure_mode_deny: true
-rate_limit_policies:
-
-  - name: toystore
-    rate_limit_domain: toystore-app
-    upstream_cluster: rate-limit-cluster
-    hostnames: ["*.toystore.com"]
-    gateway_actions:
-      - configurations:  # no rules. Applies to all *.toystore.com traffic
-          - actions:
-              - generic_key:
-                  descriptor_key: toystore-app
-                  descriptor_value: "1"
-      - rules:
-          - hosts: ["api.toystore.com"]
-        configurations:
-          - actions:
-              - generic_key:
-                  descriptor_key: api
-                  descriptor_value: "1"
-      - rules:
-          - hosts: ["admin.toystore.com"]
-        configurations:
-          - actions:
-              - generic_key:
-                  descriptor_key: admin
-                  descriptor_value: "1"
-
-
    -
  • When a request for api.toystore.com hits the filter, the descriptors generated would be:
  • -
-

descriptor 1 -

("toystore-app", "1")
-
-descriptor 2 -
("api", "1")
-

-
    -
  • When a request for admin.toystore.com hits the filter, the descriptors generated would be:
  • -
-

descriptor 1 -

("toystore-app", "1")
-
-descriptor 2 -
("admin", "1")
-

-
    -
  • When a request for other.toystore.com hits the filter, the descriptors generated would be: -descriptor 1 -
    ("toystore-app", "1")
    -
  • -
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrant-operator/doc/user-guides/dns/dns-excluding-specific-addresses/index.html b/dev/kuadrant-operator/doc/user-guides/dns/dns-excluding-specific-addresses/index.html deleted file mode 100644 index 9170ce0f..00000000 --- a/dev/kuadrant-operator/doc/user-guides/dns/dns-excluding-specific-addresses/index.html +++ /dev/null @@ -1,1371 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Dns excluding specific addresses - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Dns excluding specific addresses

- -

Excluding specific addresses from being published

-

By default DNSPolicy takes all the addresses published in the status of the Gateway it is targeting and use these values in the DNSRecord it publishes to chosen DNS provider.

-

There could be cases where you have an address assigned to a gateway that you do not want to publish to a DNS provider, but you still want DNSPolicy to publish records for other addresses.

-

To prevent a gateway address being published to the DNS provider, you can set the excludeAddresses field in the DNSPolicy resource targeting the gateway. The excludeAddresses field can be set to a hostname, an IPAddress or a CIDR.

-

Below is an example of a DNSPolicy excluding a hostname:

-
apiVersion: kuadrant.io/v1
-kind: DNSPolicy
-metadata:
-  name: prod-web
-  namespace: ${DNSPOLICY_NAMESPACE}
-spec:
-  targetRef:
-    name: prod-web-istio
-    group: gateway.networking.k8s.io
-    kind: Gateway
-  providerRefs:
-
-    - name: aws-credentials
-  loadBalancing:
-    weight: 120
-    geo: EU
-    defaultGeo: true
-  excludeAddresses:
-    - "some.local.domain"
-
-

In the above case some.local.domain will not be set up as a CNAME record in the DNS provider.

-

Note: It is valid to exclude all addresses. However this will result in existing records being removed and no new ones being created.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrant-operator/doc/user-guides/dns/orphan-dns-records/index.html b/dev/kuadrant-operator/doc/user-guides/dns/orphan-dns-records/index.html deleted file mode 100644 index 899e449b..00000000 --- a/dev/kuadrant-operator/doc/user-guides/dns/orphan-dns-records/index.html +++ /dev/null @@ -1,1449 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Orphan dns records - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

Orphan dns records

- -

Orphan DNS Records

-

This document is focused around multi-cluster DNS where you have more than one instance of a gateway that shares a common hostname with other gateways and assumes you have the observability stack set up.

-

What is an orphan record?

-

An orphan DNS record is a record or set of records that are owned by an instance of the DNS operator that no longer has a representation of those records on its cluster.

-

How do orphan records occur?

-

Orphan records can occur when a DNSRecord resource (a resource that is created in response to a DNSPolicy) is deleted without allowing the owning controller time to clean up the associated records in the DNS provider. Generally in order for this to happen, you would need to force remove a finalizer from the DNSRecord resource, delete the kuadrant-system namespace directly or un-install kuadrant (delete the subscription if using OLM) without first cleaning up existing policies or delete a cluster entirely without first cleaning up the associated DNSPolicies. These are not common scenarios but when they do occur they can leave behind records in your DNS Provider which may point to IPs / Hosts that are no longer valid.

-

How do you spot an orphan record(s) exist?

-

There is a prometheus based alert that uses some metrics exposed from the DNS components to spot this situation. If you have installed the alerts for Kuadrant under the examples folder, you will see in the alerts tab an alert called PossibleOrphanedDNSRecords. When this is firing it means there are likely to be orphaned records in your provider.

-

How do you get rid of an orphan record?

-

To remove an Orphan Record we must first identify the owner that is no longer aware of the record. To do this we need an existing DNSRecord in another cluster.

-

Example: You have 2 clusters that each have a gateway and share a host apps.example.com and have setup a DNSPolicy for each gateway. On cluster 1 you remove the kuadrant-system namespace without first cleaning up existing DNSPolicies targeting the gateway in your ingress-gateway namespace. Now there are a set of records that were being managed for that gateway that have not been removed. -On cluster 2 the DNS Operator managing the existing DNSRecord in that cluster has a record of all owners of that dns name. -In prometheus alerts, it spots that the number of owners does not correlate to the number of DNSRecord resources and triggers an alert. -To remedy this rather than going to the DNS provider directly and trying to figure out which records to remove, you can instead follow the steps below.

-

Get the owner id of the DNSRecord on cluster 2 for the shared host

-
kubectl get dnsrecord somerecord -n my-gateway-ns -o=jsonpath='{.status.ownerID}'
-
-

Get all the owner ids

-
kubectl get dnsrecord.kuadrant.io somerecord -n my-gateway-ns -o=jsonpath='{.status.domainOwners}'
-
-# output
-# ["26aacm1z","49qn0wp7"]
-
-

Create a placeholder DNSRecord with none active ownerID

-

For each owner id returned that isn't the owner id of the record that we want to remove records for, we need to create a dnsrecord resource and delete it. This will trigger the running operator in this cluster to clean up those records.

-

This is one of the owner id not in the existing dnsrecord on cluster -

export ownerID=26aacm1z  
-
-export rootHost=$(kubectl get dnsrecord.kuadrant.io somerecord -n  my-gateway-ns -o=jsonpath='{.spec.rootHost}')
-

-

Export a namespace with the aws credentials in it -

export targetNS=kuadrant-system 
-
-kubectl apply -f - <<EOF
-apiVersion: kuadrant.io/v1alpha1
-kind: DNSRecord
-metadata:
-  name: delete-old-loadbalanced-dnsrecord
-  namespace: ${targetNS}
-spec:
-  providerRef:
-    name: my-aws-credentials
-  ownerID: ${ownerID}
-  rootHost: ${rootHost}
-  endpoints:
-
-    - dnsName: ${rootHost}
-      recordTTL: 60
-      recordType: CNAME
-      targets:
-        - klb.doesnt-exist.${rootHost}
-EOF
-

-

Delete the DNSrecord

-
kubectl delete dnsrecord.kuadrant.io delete-old-loadbalanced-dnsrecord -n ${targetNS} 
-
-

Verification

-

We can verify that the steps worked correctly, by checking the DNSRecord again. Note it may take a several minutes for the other record to update. We can force it by adding a label to the record

-
kubectl label dnsrecord.kuadrant.io somerecord test=test -n ${targetNS}
-
-kubectl get dnsrecord.kuadrant.io somerecord -n my-gateway-ns -o=jsonpath='{.status.domainOwners}'
-
-

You should also see your alert eventually stop triggering.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrant-operator/doc/user-guides/misc/external-api/index.html b/dev/kuadrant-operator/doc/user-guides/misc/external-api/index.html deleted file mode 100644 index 6d129188..00000000 --- a/dev/kuadrant-operator/doc/user-guides/misc/external-api/index.html +++ /dev/null @@ -1,1498 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Using Gateway API and Kuadrant with APIs outside of the cluster - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Using Gateway API and Kuadrant with APIs outside of the cluster

-

Overview

-

In some cases, the application and API endpoints are exposed in a host external to the cluster where you are a running Gateway API and Kuadrant but you do not want it accessible directly via the public internet. If you want to have external traffic come into a Gateway API defined Gateway and protected by Kuadrant policies first being proxied to the existing legacy endpoints, this guide will give you some example of how to achieve this.

-

What we will do

-
    -
  • Have an API in a private location become accessible via a public hostname
  • -
  • Setup a gateway and HTTPRoute to expose this private API via our new Gateway on a (public) domain.
  • -
  • proxy valid requests through to our back-end API service
  • -
  • Add auth and rate limiting and TLS to our public Gateway to protect it
  • -
-

Pre Requisites

-
    -
  • Kuadrant and Gateway API installed (with Istio as the gateway provider)
  • -
  • Existing API on separate cluster accessible via HTTP from the Gateway cluster
  • -
-

What we want to achieve:

-
                                ------------------- DMZ -----------------|
-                                                                         |
-                               |-------------------------------- internal network -----------------------------------| 
-                    load balancer                                        |                                            |           
-                        | - |  |      |----------k8s cluster-----------| |   |----- Legacy API Location --------|     |
-                        |   |  |      |  Gateway  Kuadrant             | |   |                                  |     |       
-                        |   |  |      |   -----    -----               | |   |                                  |     |                     
----public traffic--my.api.com-------->|   |    |<--|   |               | |   |  HTTP (my.api.local)   Backend   |     |
-                        |   |  |      |   |    |   -----               | |   |      -----             -----     |     | 
-                        |   |  |      |   ----- -----------proxy---(my.api.local)-->|   | ----------> |   |     |     | 
-                        |   |  |      |                                | |   |      -----             -----     |     | 
-                        | - |  |      |--------------------------------| |   |----------------------------------|     | 
-                               |                                         |                                            |   
-                               |-----------------------------------------|--------------------------------------------| 
-                                                                         |
-                                ------------------- DMZ -----------------|       
-
-

Note for all of the resources defined here there is a copy of them under the examples folder

-

1) Deploy a Gateway into the K8s cluster that will act as the main Ingress Gateway

-

Define your external API hostname and Internal API hostname

-
export EXTERNAL_HOST=my.api.com
-export INTERNAL_HOST=my.api.local
-
-
kubectl apply -n gateway-system -f - <<EOF
-apiVersion: gateway.networking.k8s.io/v1
-kind: Gateway
-metadata:
-  labels:
-    istio: ingress
-  name: ingress
-spec:
-  gatewayClassName: istio
-  listeners:
-
-    - name: ingress-tls
-      port: 443
-      hostname: '${EXTERNAL_HOST}'
-      protocol: HTTPS
-      allowedRoutes:
-        namespaces:
-          from: All
-      tls:
-        mode: Terminate
-        certificateRefs:
-          - name: ingress-tls  #you can use TLSPolicy to provide this certificate or provide it manually
-            kind: Secret
-EOF            
-
-

2) Optional: Use TLSPolicy to configure TLS certificates for your listeners

-

TLSPolicy Guide

-

3) Optional: Use DNSPolicy to bring external traffic to the external hostname

-

DNSPolicy Guide

-

4) Ensure the Gateway has the status of Programmed set to True meaning it is ready.

-
kubectl get gateway ingress -n gateway-system -o=jsonpath='{.status.conditions[?(@.type=="Programmed")].status}'
-
-

5) Let Istio know about the external hostname and the rules it should use when sending traffic to that destination.

-

Create a ServiceEntry

-
kubectl apply -n gateway-system -f - <<EOF
-apiVersion: networking.istio.io/v1beta1
-kind: ServiceEntry
-metadata:
-  name: internal-api
-spec:
-  hosts:
-
-    - ${INTERNAL_HOST} # your internal http endpoint
-  location: MESH_EXTERNAL
-  resolution: DNS
-  ports:
-    - number: 80
-      name: http
-      protocol: HTTP
-    - number: 443
-      name: https
-      protocol: TLS
-EOF
-
-

Create a DestionationRule to configure how to handle traffic to this endpoint.

-
kubectl apply -n gateway-system -f - <<EOF
-apiVersion: networking.istio.io/v1
-kind: DestinationRule
-metadata:
-  name: internal-api
-spec:
-  host: ${INTERNAL_HOST}
-  trafficPolicy:
-    tls:
-      mode: SIMPLE
-      sni: ${INTERNAL_HOST}
-EOF
-
-

6) Create a HTTPRoute that will route traffic for the Gateway and re-write the host

-
kubectl apply -n gateway-system -f - <<EOF
-apiVersion: gateway.networking.k8s.io/v1
-kind: HTTPRoute
-metadata:
-  name: external-host
-spec:
-  parentRefs:
-
-    - name: ingress
-  hostnames:
-    - ${EXTERNAL_HOST}
-  rules:
-    - backendRefs:
-        - name: ${INTERNAL_HOST}
-          kind: Hostname
-          group: networking.istio.io
-          port: 443
-      filters:
-        - type: URLRewrite
-          urlRewrite:
-            hostname: ${INTERNAL_HOST}
-EOF
-
-

We should now be able to send requests to our external host and have the Gateway proxy requests and responses to and from the internal host.

-

7) (optional) Add Auth and RateLimiting to protect your public endpoint

-

As we are using Gateway API to define the Gateway and HTTPRoutes, we can now also apply RateLimiting and Auth to protect our public endpoints

-

AuthPolicy Guide

-

RateLimiting Guide

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrant-operator/examples/alerts/index.html b/dev/kuadrant-operator/examples/alerts/index.html deleted file mode 100644 index dd988608..00000000 --- a/dev/kuadrant-operator/examples/alerts/index.html +++ /dev/null @@ -1,1462 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Index - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Index

- -

SLO Multi burn rate multi window alerts

-

Kuadrant have created two example SLO alerts to help give ideas on the types of SLO alerts that could be used with the operator. We have created one alert for latency and one for availability, both are Multiwindow, Multi-Burn-Rate Alerts. The alerts show a scenario where a 28d rolling window is used and a uptime of 99.95% i.e only 0.05% error budget margin is desired. This in real world time would be downtime of around:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Time FrameDuration
Daily:43s
Weekly:5m 2.4s
Monthly:21m 44s
Quarterly:1h 5m 12s
Yearly:4h 20m 49s
-

These values can be changed to suit different scenarios

-

Sloth

-

Sloth is a tool to aid in the creation of multi burn rate and multi window SLO alerts and was used to create both the availability and latency alerts. It follows the common standard set out by Google's SRE book. Sloth generates alerts based on specific specs given. The specs for our example alerts can be found in the example/sloth folder.

-

Metrics used for the alerts

-

Availability

-

For the availability SLO alerts the Istio metric istio_requests_total was used as its a counter type metric meaning the values can only increase as well as it gives information on all requests handled by the Istio proxy.

-

Latency

-

For the availability SLO alerts the Istio metric istio_request_duration_milliseconds was used as its a Histogram.

-

Sloth generation

-

You can modify the examples Sloth specs we have and regenerate the prometheus rules using the Sloth CLI and the generate command. For more information please the Sloth website

-

sloth generate -i examples/alerts/sloth/latency.yaml --default-slo-period=28d
-
-You can also use the make target to generate the rules to.

-
make sloth-generate
-
-

Prometheus unit tests

-

There are also two matching unit tests to verify and test the alerts that Sloth has generated. These can be run using the make target:

-
make test-alerts
-
-

Note: The prometheus unit tests will also run via Github actions when a change is made in the alerts file.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrant-operator/examples/alerts/kustomization.yaml b/dev/kuadrant-operator/examples/alerts/kustomization.yaml deleted file mode 100644 index 66b05c53..00000000 --- a/dev/kuadrant-operator/examples/alerts/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - prometheusrules_policies_missing.yaml - - slo-availability.yaml - - slo-latency.yaml - - orphan_records.yaml diff --git a/dev/kuadrant-operator/examples/alerts/orphan_records.yaml b/dev/kuadrant-operator/examples/alerts/orphan_records.yaml deleted file mode 100644 index 1f3e5f94..00000000 --- a/dev/kuadrant-operator/examples/alerts/orphan_records.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: dns-records-rules - namespace: monitoring -spec: - groups: - - name: dns_records - rules: - - alert: PossibleOrphanedDNSRecords - expr: | - sum by(rootDomain) ( - count by(rootDomain) (kuadrant_dnsrecord_status_root_domain_owners) / - count by(rootDomain) (kuadrant_dnsrecord_status) - - count by(rootDomain) (kuadrant_dnsrecord_status) - ) > 0 - for: 5m - labels: - severity: warning - annotations: - summary: "The number of DNS Owners is greater than the number of records for root domain '{{ $labels.rootDomain }}'" - description: "This alert fires if the number of owners (controller collaborating on a record set) is greater than the number of records. This may mean a record has been left behind in the provider due to a failed delete" diff --git a/dev/kuadrant-operator/examples/alerts/prometheusrules_policies_missing.yaml b/dev/kuadrant-operator/examples/alerts/prometheusrules_policies_missing.yaml deleted file mode 100644 index 3622b796..00000000 --- a/dev/kuadrant-operator/examples/alerts/prometheusrules_policies_missing.yaml +++ /dev/null @@ -1,57 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: policies-missing - namespace: monitoring -spec: - groups: - - name: policy-rules - rules: - - alert: GatewayWithoutDnsPolicy - expr: | - label_replace(gatewayapi_gateway_info, "gateway_name", "$1", "name", "(.*)") unless - (label_replace(gatewayapi_gateway_info, "gateway_name", "$1", "name", "(.*)") - * on(gateway_name) group_left - label_replace(gatewayapi_dnspolicy_target_info{target_kind="Gateway"}, "gateway_name", "$1", "target_name", "(.*)")) - for: 5m - labels: - severity: warning - annotations: - summary: "No DNSPolicy targeting Gateway '{{ $labels.gateway_name }}'" - description: "This alert fires if a gateway does not have an associated DNSPolicy." - - alert: GatewayWithoutTlsPolicy - expr: | - label_replace(gatewayapi_gateway_info, "gateway_name", "$1", "name", "(.*)") unless - (label_replace(gatewayapi_gateway_info, "gateway_name", "$1", "name", "(.*)") - * on(gateway_name) group_left - label_replace(gatewayapi_tlspolicy_target_info{target_kind="Gateway"}, "gateway_name", "$1", "target_name", "(.*)")) - for: 5m - labels: - severity: warning - annotations: - summary: "No TLSPolicy targeting Gateway '{{ $labels.gateway_name }}'" - description: "This alert fires if a gateway does not have an associated TLSPolicy." - - alert: HTTPRouteWithoutAuthPolicy - expr: | - label_replace(gatewayapi_httproute_created, "httproute_name", "$1", "name", "(.*)") unless - (label_replace(gatewayapi_httproute_created, "httproute_name", "$1", "name", "(.*)") - * on(httproute_name) group_left - label_replace(gatewayapi_authpolicy_target_info{target_kind="HTTPRoute"}, "httproute_name", "$1", "target_name", "(.*)")) - for: 5m - labels: - severity: warning - annotations: - summary: "No AuthPolicy targeting HTTPRoute '{{ $labels.httproute_name }}'" - description: "This alert fires if a HTTPRoute does not have an associated AuthPolicy." - - alert: HTTPRouteWithoutRateLimitPolicy - expr: | - label_replace(gatewayapi_httproute_created, "httproute_name", "$1", "name", "(.*)") unless - (label_replace(gatewayapi_httproute_created, "httproute_name", "$1", "name", "(.*)") - * on(httproute_name) group_left - label_replace(gatewayapi_ratelimitpolicy_target_info{target_kind="HTTPRoute"}, "httproute_name", "$1", "target_name", "(.*)")) - for: 5m - labels: - severity: warning - annotations: - summary: "No RateLimitPolicy targeting HTTPRoute '{{ $labels.httproute_name }}'" - description: "This alert fires if a HTTPRoute does not have an associated RateLimitPolicy." diff --git a/dev/kuadrant-operator/examples/alerts/slo-availability.yaml b/dev/kuadrant-operator/examples/alerts/slo-availability.yaml deleted file mode 100644 index 3263bc61..00000000 --- a/dev/kuadrant-operator/examples/alerts/slo-availability.yaml +++ /dev/null @@ -1,212 +0,0 @@ - ---- -# Code generated by Sloth (v0.11.0): https://github.com/slok/sloth. -# DO NOT EDIT. - -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - creationTimestamp: null - labels: - app.kubernetes.io/component: SLO - app.kubernetes.io/managed-by: sloth - name: availability-slo - namespace: monitoring -spec: - groups: - - name: sloth-slo-sli-recordings-kuadrant-requests-availability - rules: - - expr: | - (sum(rate(istio_requests_total{job="ingress-metrics-proxy",response_code=~"5.*"}[5m])) by (request_host)) - / - (sum(rate(istio_requests_total{job="ingress-metrics-proxy"}[5m]) )by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - sloth_window: 5m - record: slo:sli_error:ratio_rate5m - - expr: | - (sum(rate(istio_requests_total{job="ingress-metrics-proxy",response_code=~"5.*"}[30m])) by (request_host)) - / - (sum(rate(istio_requests_total{job="ingress-metrics-proxy"}[30m]) )by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - sloth_window: 30m - record: slo:sli_error:ratio_rate30m - - expr: | - (sum(rate(istio_requests_total{job="ingress-metrics-proxy",response_code=~"5.*"}[1h])) by (request_host)) - / - (sum(rate(istio_requests_total{job="ingress-metrics-proxy"}[1h]) )by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - sloth_window: 1h - record: slo:sli_error:ratio_rate1h - - expr: | - (sum(rate(istio_requests_total{job="ingress-metrics-proxy",response_code=~"5.*"}[2h])) by (request_host)) - / - (sum(rate(istio_requests_total{job="ingress-metrics-proxy"}[2h]) )by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - sloth_window: 2h - record: slo:sli_error:ratio_rate2h - - expr: | - (sum(rate(istio_requests_total{job="ingress-metrics-proxy",response_code=~"5.*"}[6h])) by (request_host)) - / - (sum(rate(istio_requests_total{job="ingress-metrics-proxy"}[6h]) )by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - sloth_window: 6h - record: slo:sli_error:ratio_rate6h - - expr: | - (sum(rate(istio_requests_total{job="ingress-metrics-proxy",response_code=~"5.*"}[1d])) by (request_host)) - / - (sum(rate(istio_requests_total{job="ingress-metrics-proxy"}[1d]) )by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - sloth_window: 1d - record: slo:sli_error:ratio_rate1d - - expr: | - (sum(rate(istio_requests_total{job="ingress-metrics-proxy",response_code=~"5.*"}[3d])) by (request_host)) - / - (sum(rate(istio_requests_total{job="ingress-metrics-proxy"}[3d]) )by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - sloth_window: 3d - record: slo:sli_error:ratio_rate3d - - expr: | - sum_over_time(slo:sli_error:ratio_rate5m{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"}[4w]) - / ignoring (sloth_window) - count_over_time(slo:sli_error:ratio_rate5m{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"}[4w]) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - sloth_window: 4w - record: slo:sli_error:ratio_rate4w - - name: sloth-slo-meta-recordings-kuadrant-requests-availability - rules: - - expr: vector(0.9995) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - record: slo:objective:ratio - - expr: vector(1-0.9995) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - record: slo:error_budget:ratio - - expr: vector(28) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - record: slo:time_period:days - - expr: | - slo:sli_error:ratio_rate5m{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} - / on(sloth_id, sloth_slo, sloth_service) group_left - slo:error_budget:ratio{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - record: slo:current_burn_rate:ratio - - expr: | - slo:sli_error:ratio_rate4w{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} - / on(sloth_id, sloth_slo, sloth_service) group_left - slo:error_budget:ratio{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - record: slo:period_burn_rate:ratio - - expr: 1 - slo:period_burn_rate:ratio{sloth_id="kuadrant-requests-availability", - sloth_service="kuadrant", sloth_slo="requests-availability"} - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_slo: requests-availability - record: slo:period_error_budget_remaining:ratio - - expr: vector(1) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-availability - sloth_mode: cli-gen-k8s - sloth_objective: "99.95" - sloth_service: kuadrant - sloth_slo: requests-availability - sloth_spec: sloth.slok.dev/v1 - sloth_version: v0.11.0 - record: sloth_slo_info - - name: sloth-slo-alerts-kuadrant-requests-availability - rules: - - alert: KuadrantAvailabilityHighErrorRate - annotations: - summary: High error rate on HTTPRoute requests responses - title: (page) {{$labels.sloth_service}} {{$labels.sloth_slo}} SLO error budget - burn rate is too fast. - expr: | - ( - max(slo:sli_error:ratio_rate5m{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} > (13.44 * 0.0004999999999999716)) without (sloth_window) - and - max(slo:sli_error:ratio_rate1h{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} > (13.44 * 0.0004999999999999716)) without (sloth_window) - ) - or - ( - max(slo:sli_error:ratio_rate30m{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} > (5.6000000000000005 * 0.0004999999999999716)) without (sloth_window) - and - max(slo:sli_error:ratio_rate6h{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} > (5.6000000000000005 * 0.0004999999999999716)) without (sloth_window) - ) - labels: - category: availability - severity: critical - sloth_severity: page - - alert: KuadrantAvailabilityHighErrorRate - annotations: - summary: High error rate on HTTPRoute requests responses - title: (ticket) {{$labels.sloth_service}} {{$labels.sloth_slo}} SLO error - budget burn rate is too fast. - expr: | - ( - max(slo:sli_error:ratio_rate2h{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} > (2.8000000000000003 * 0.0004999999999999716)) without (sloth_window) - and - max(slo:sli_error:ratio_rate1d{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} > (2.8000000000000003 * 0.0004999999999999716)) without (sloth_window) - ) - or - ( - max(slo:sli_error:ratio_rate6h{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} > (0.9333333333333333 * 0.0004999999999999716)) without (sloth_window) - and - max(slo:sli_error:ratio_rate3d{sloth_id="kuadrant-requests-availability", sloth_service="kuadrant", sloth_slo="requests-availability"} > (0.9333333333333333 * 0.0004999999999999716)) without (sloth_window) - ) - labels: - category: availability - severity: warning - sloth_severity: ticket diff --git a/dev/kuadrant-operator/examples/alerts/slo-latency.yaml b/dev/kuadrant-operator/examples/alerts/slo-latency.yaml deleted file mode 100644 index 63e61c2c..00000000 --- a/dev/kuadrant-operator/examples/alerts/slo-latency.yaml +++ /dev/null @@ -1,212 +0,0 @@ - ---- -# Code generated by Sloth (v0.11.0): https://github.com/slok/sloth. -# DO NOT EDIT. - -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - creationTimestamp: null - labels: - app.kubernetes.io/component: SLO - app.kubernetes.io/managed-by: sloth - name: latency-slo - namespace: monitoring -spec: - groups: - - name: sloth-slo-sli-recordings-kuadrant-requests-latency - rules: - - expr: | - (( sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[5m]))by (request_host) - sum(rate(istio_request_duration_milliseconds_bucket{le="250", job="ingress-metrics-proxy", response_code="200" }[5m]) )by (request_host) )) - / - (sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[5m]))by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - sloth_window: 5m - record: slo:sli_error:ratio_rate5m - - expr: | - (( sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[30m]))by (request_host) - sum(rate(istio_request_duration_milliseconds_bucket{le="250", job="ingress-metrics-proxy", response_code="200" }[30m]) )by (request_host) )) - / - (sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[30m]))by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - sloth_window: 30m - record: slo:sli_error:ratio_rate30m - - expr: | - (( sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[1h]))by (request_host) - sum(rate(istio_request_duration_milliseconds_bucket{le="250", job="ingress-metrics-proxy", response_code="200" }[1h]) )by (request_host) )) - / - (sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[1h]))by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - sloth_window: 1h - record: slo:sli_error:ratio_rate1h - - expr: | - (( sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[2h]))by (request_host) - sum(rate(istio_request_duration_milliseconds_bucket{le="250", job="ingress-metrics-proxy", response_code="200" }[2h]) )by (request_host) )) - / - (sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[2h]))by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - sloth_window: 2h - record: slo:sli_error:ratio_rate2h - - expr: | - (( sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[6h]))by (request_host) - sum(rate(istio_request_duration_milliseconds_bucket{le="250", job="ingress-metrics-proxy", response_code="200" }[6h]) )by (request_host) )) - / - (sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[6h]))by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - sloth_window: 6h - record: slo:sli_error:ratio_rate6h - - expr: | - (( sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[1d]))by (request_host) - sum(rate(istio_request_duration_milliseconds_bucket{le="250", job="ingress-metrics-proxy", response_code="200" }[1d]) )by (request_host) )) - / - (sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[1d]))by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - sloth_window: 1d - record: slo:sli_error:ratio_rate1d - - expr: | - (( sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[3d]))by (request_host) - sum(rate(istio_request_duration_milliseconds_bucket{le="250", job="ingress-metrics-proxy", response_code="200" }[3d]) )by (request_host) )) - / - (sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[3d]))by (request_host)) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - sloth_window: 3d - record: slo:sli_error:ratio_rate3d - - expr: | - sum_over_time(slo:sli_error:ratio_rate5m{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"}[4w]) - / ignoring (sloth_window) - count_over_time(slo:sli_error:ratio_rate5m{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"}[4w]) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - sloth_window: 4w - record: slo:sli_error:ratio_rate4w - - name: sloth-slo-meta-recordings-kuadrant-requests-latency - rules: - - expr: vector(0.9995) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - record: slo:objective:ratio - - expr: vector(1-0.9995) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - record: slo:error_budget:ratio - - expr: vector(28) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - record: slo:time_period:days - - expr: | - slo:sli_error:ratio_rate5m{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} - / on(sloth_id, sloth_slo, sloth_service) group_left - slo:error_budget:ratio{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - record: slo:current_burn_rate:ratio - - expr: | - slo:sli_error:ratio_rate4w{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} - / on(sloth_id, sloth_slo, sloth_service) group_left - slo:error_budget:ratio{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - record: slo:period_burn_rate:ratio - - expr: 1 - slo:period_burn_rate:ratio{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", - sloth_slo="requests-latency"} - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_slo: requests-latency - record: slo:period_error_budget_remaining:ratio - - expr: vector(1) - labels: - owner: kuadrant-org - sloth_id: kuadrant-requests-latency - sloth_mode: cli-gen-k8s - sloth_objective: "99.95" - sloth_service: kuadrant - sloth_slo: requests-latency - sloth_spec: sloth.slok.dev/v1 - sloth_version: v0.11.0 - record: sloth_slo_info - - name: sloth-slo-alerts-kuadrant-requests-latency - rules: - - alert: KuadrantlatencyHighErrorRate - annotations: - summary: High latency on HTTPRoute requests responses - title: (page) {{$labels.sloth_service}} {{$labels.sloth_slo}} SLO error budget - burn rate is too fast. - expr: | - ( - max(slo:sli_error:ratio_rate5m{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} > (13.44 * 0.0004999999999999716)) without (sloth_window) - and - max(slo:sli_error:ratio_rate1h{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} > (13.44 * 0.0004999999999999716)) without (sloth_window) - ) - or - ( - max(slo:sli_error:ratio_rate30m{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} > (5.6000000000000005 * 0.0004999999999999716)) without (sloth_window) - and - max(slo:sli_error:ratio_rate6h{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} > (5.6000000000000005 * 0.0004999999999999716)) without (sloth_window) - ) - labels: - category: latency - severity: critical - sloth_severity: page - - alert: KuadrantlatencyHighErrorRate - annotations: - summary: High latency on HTTPRoute requests responses - title: (ticket) {{$labels.sloth_service}} {{$labels.sloth_slo}} SLO error - budget burn rate is too fast. - expr: | - ( - max(slo:sli_error:ratio_rate2h{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} > (2.8000000000000003 * 0.0004999999999999716)) without (sloth_window) - and - max(slo:sli_error:ratio_rate1d{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} > (2.8000000000000003 * 0.0004999999999999716)) without (sloth_window) - ) - or - ( - max(slo:sli_error:ratio_rate6h{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} > (0.9333333333333333 * 0.0004999999999999716)) without (sloth_window) - and - max(slo:sli_error:ratio_rate3d{sloth_id="kuadrant-requests-latency", sloth_service="kuadrant", sloth_slo="requests-latency"} > (0.9333333333333333 * 0.0004999999999999716)) without (sloth_window) - ) - labels: - category: latency - severity: warning - sloth_severity: ticket diff --git a/dev/kuadrant-operator/examples/alerts/sloth/slo-availability.yaml b/dev/kuadrant-operator/examples/alerts/sloth/slo-availability.yaml deleted file mode 100644 index 65768e26..00000000 --- a/dev/kuadrant-operator/examples/alerts/sloth/slo-availability.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: sloth.slok.dev/v1 -kind: PrometheusServiceLevel -metadata: - name: availability-slo - namespace: monitoring -spec: - service: "kuadrant" - labels: - owner: "kuadrant-org" - slos: - - name: "requests-availability" - objective: 99.95 - description: "Multi window multi burn rate SLO based on availability for HTTP request responses." - sli: - events: - errorQuery: sum(rate(istio_requests_total{job="ingress-metrics-proxy",response_code=~"5.*"}[{{.window}}])) by (request_host) - totalQuery: sum(rate(istio_requests_total{job="ingress-metrics-proxy"}[{{.window}}]) )by (request_host) - alerting: - name: KuadrantAvailabilityHighErrorRate - labels: - category: "availability" - annotations: - # Overwrite default Sloth SLO alert summmary on ticket and page alerts. - summary: "High error rate on HTTPRoute requests responses" - pageAlert: - labels: - severity: critical - ticketAlert: - labels: - severity: warning - diff --git a/dev/kuadrant-operator/examples/alerts/sloth/slo-latency.yaml b/dev/kuadrant-operator/examples/alerts/sloth/slo-latency.yaml deleted file mode 100644 index 6f526dc8..00000000 --- a/dev/kuadrant-operator/examples/alerts/sloth/slo-latency.yaml +++ /dev/null @@ -1,34 +0,0 @@ -apiVersion: sloth.slok.dev/v1 -kind: PrometheusServiceLevel -metadata: - name: latency-slo - namespace: monitoring -spec: - service: "kuadrant" - labels: - owner: "kuadrant-org" - slos: - - name: "requests-latency" - objective: 99.95 - description: "Multi window multi burn rate SLO based on latency for HTTP request responses." - sli: - events: - errorQuery: ( - sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[{{.window}}]))by (request_host) - - - sum(rate(istio_request_duration_milliseconds_bucket{le="250", job="ingress-metrics-proxy", response_code="200" }[{{.window}}]) )by (request_host) - ) - totalQuery: sum(rate(istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"}[{{.window}}]))by (request_host) - alerting: - name: KuadrantlatencyHighErrorRate - labels: - category: "latency" - annotations: - # Overwrite default Sloth SLO alert summmary on ticket and page alerts. - summary: "High latency on HTTPRoute requests responses" - pageAlert: - labels: - severity: critical - ticketAlert: - labels: - severity: warning diff --git a/dev/kuadrant-operator/examples/alerts/tests/slo-availability-test.yaml b/dev/kuadrant-operator/examples/alerts/tests/slo-availability-test.yaml deleted file mode 100644 index 9b8a3352..00000000 --- a/dev/kuadrant-operator/examples/alerts/tests/slo-availability-test.yaml +++ /dev/null @@ -1,47 +0,0 @@ -rule_files: - - /prometheus/availability-rules.yaml - -evaluation_interval: 1m - -tests: - - interval: 1m - input_series: - # Promtool uses expanding notation as its way of creating time series (https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series) - # 0+0x30 = 0, 0, 0, ...0 For a total of 31 times THEN 0+10x30 = 0, 10 ,20, ... 300 For a total of 31 times - - series: istio_requests_total{job="ingress-metrics-proxy",response_code="500"} - values: "0+0x30 0+10x30" - # 0+1x30 = 0, 1, 2, ...30 For a total of 31 times THEN 31+100x30 = 31, 131 ,231, 331 ... 3031 For a total of 31 times - - series: istio_requests_total{job="ingress-metrics-proxy"} - values: "0+1x30 31+100x30" - alert_rule_test: - - eval_time: 30m - alertname: KuadrantAvailabilityHighErrorRate - exp_alerts: [] - - eval_time: 60m - alertname: KuadrantAvailabilityHighErrorRate - exp_alerts: - - exp_labels: - alertname: KuadrantAvailabilityHighErrorRate - category: availability - owner: kuadrant-org - severity: critical - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_severity: page - sloth_slo: requests-availability - exp_annotations: - summary: High error rate on HTTPRoute requests responses - title: (page) kuadrant requests-availability SLO error budget burn rate is too fast. - - exp_labels: - alertname: KuadrantAvailabilityHighErrorRate - category: availability - owner: kuadrant-org - severity: warning - sloth_id: kuadrant-requests-availability - sloth_service: kuadrant - sloth_severity: ticket - sloth_slo: requests-availability - exp_annotations: - summary: High error rate on HTTPRoute requests responses - title: (ticket) kuadrant requests-availability SLO error budget burn rate is too fast. - diff --git a/dev/kuadrant-operator/examples/alerts/tests/slo-latency-test.yaml b/dev/kuadrant-operator/examples/alerts/tests/slo-latency-test.yaml deleted file mode 100644 index de331c0d..00000000 --- a/dev/kuadrant-operator/examples/alerts/tests/slo-latency-test.yaml +++ /dev/null @@ -1,47 +0,0 @@ -rule_files: - - /prometheus/latency-rules.yaml - -evaluation_interval: 1m - -tests: - - interval: 1m - input_series: - # Promtool uses expanding notation as its way of creating time series (https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/#series) - # 0+1x30 = 0, 1, 2, ...30 For a total of 31 times THEN 31+10x30 = 31, 41 ,51, ... 331 For a total of 31 times - - series: istio_request_duration_milliseconds_bucket{le="250",job="ingress-metrics-proxy",response_code="200"} - values: "0+1x30 31+10x30" - # 0+1x30 = 0, 1, 2, ...30 For a total of 31 times THEN 31+100x30 = 31, 131 ,231, 331 ... 3031 For a total of 31 times - - series: istio_request_duration_milliseconds_count{job="ingress-metrics-proxy",response_code="200"} - values: "0+1x30 31+100x30" - alert_rule_test: - - eval_time: 30m - alertname: KuadrantlatencyHighErrorRate - exp_alerts: [] - - eval_time: 60m - alertname: KuadrantlatencyHighErrorRate - exp_alerts: - - exp_labels: - alertname: KuadrantlatencyHighErrorRate - category: latency - owner: kuadrant-org - severity: critical - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_severity: page - sloth_slo: requests-latency - exp_annotations: - summary: High latency on HTTPRoute requests responses - title: (page) kuadrant requests-latency SLO error budget burn rate is too fast. - - exp_labels: - alertname: KuadrantlatencyHighErrorRate - category: latency - owner: kuadrant-org - severity: warning - sloth_id: kuadrant-requests-latency - sloth_service: kuadrant - sloth_severity: ticket - sloth_slo: requests-latency - exp_annotations: - summary: High latency on HTTPRoute requests responses - title: (ticket) kuadrant requests-latency SLO error budget burn rate is too fast. - diff --git a/dev/kuadrant-operator/examples/dashboards/app_developer.json b/dev/kuadrant-operator/examples/dashboards/app_developer.json deleted file mode 100644 index d8406cd6..00000000 --- a/dev/kuadrant-operator/examples/dashboards/app_developer.json +++ /dev/null @@ -1,1693 +0,0 @@ -{ - "__requires": [ - { - "type": "panel", - "id": "dashlist", - "name": "Dashboard list", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "9.5.3" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "table", - "name": "Table", - "version": "" - }, - { - "type": "panel", - "id": "text", - "name": "Text", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "description": "App Developer Dashboard", - "editable": true, - "fiscalYearStartMonth": 0, - "gnetId": 7630, - "graphTooltip": 1, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "description": "", - "gridPos": { - "h": 7, - "w": 6, - "x": 0, - "y": 0 - }, - "id": 155, - "options": { - "maxItems": 10, - "query": "", - "showHeadings": false, - "showRecentlyViewed": false, - "showSearch": true, - "showStarred": false, - "tags": [ - "kuadrant" - ] - }, - "pluginVersion": "9.5.3", - "title": "Kuadrant Dashboards", - "type": "dashlist" - }, - { - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "description": "", - "gridPos": { - "h": 7, - "w": 18, - "x": 6, - "y": 0 - }, - "id": 153, - "options": { - "code": { - "language": "plaintext", - "showLineNumbers": false, - "showMiniMap": false - }, - "content": "#### Overview of API/HTTPRoute Metrics\n\nThe row of panels below is repeated for each HTTPRoute resource in your cluster.\nThey provide real-time & historical insights into the performance and health of each API.\nMetrics displayed include request rates, success/error breakdown, and latency percentiles, giving a snapshot of API efficiency and reliability.\nUse this dashboard to monitor traffic patterns, identify potential issues, and ensure optimal performance of your services.\n\n*Important: HTTPRoutes must include a \"service\" and \"deployment\" label with a value that matches the name of the service & deployment being routed to. eg. \"service=myapp, deployment=myapp\"*", - "mode": "markdown" - }, - "pluginVersion": "9.5.3", - "title": "App Developer Dashboard", - "type": "text" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 7 - }, - "id": 141, - "panels": [], - "repeat": "api", - "title": "\"$api\" API - Requests, latency and errors", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Hostname: \"Domain name for the API server.\" | API Namespace: \"Kubernetes namespace where the API is deployed.\" | Gateway name: \"Name of the Gateway API gateway the HTTPRoute is targeting\" | Gateway name: \"Name of the Gateway API gateway namespace\"\n", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 8 - }, - "id": 97, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_httproute_hostname_info{name=~\"$api\"}", - "format": "table", - "hide": false, - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_httproute_labels{name=~\"$api\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_httproute_parent_info{name=~\"$api\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "C" - } - ], - "transformations": [ - { - "id": "seriesToColumns", - "options": { - "byField": "name" - } - }, - { - "id": "organize", - "options": { - "excludeByName": { - "Time 1": true, - "Time 2": true, - "Value #A": true, - "Value #B": true, - "__name__ 1": true, - "__name__ 2": true, - "cluster_id 1": false, - "container 1": true, - "container 2": true, - "customresource_group 1": true, - "customresource_group 2": true, - "customresource_kind 1": true, - "customresource_kind 2": true, - "customresource_version 1": true, - "customresource_version 2": true, - "deployment": false, - "exported_namespace 2": true, - "instance 1": true, - "instance 2": true, - "job 1": true, - "job 2": true, - "name": true, - "namespace 1": false, - "namespace 2": true, - "namespace 3": false, - "parent_name": false, - "parent_namespace": false, - "prometheus 1": true, - "prometheus 2": true, - "service": false - }, - "indexByName": { - "Time 1": 1, - "Time 2": 13, - "Time 3": 33, - "Value #A": 12, - "Value #B": 24, - "Value #C": 51, - "__name__ 1": 2, - "__name__ 2": 14, - "__name__ 3": 34, - "cluster_id 1": 52, - "cluster_id 2": 28, - "cluster_id 3": 35, - "container 1": 3, - "container 2": 15, - "container 3": 36, - "customresource_group 1": 4, - "customresource_group 2": 16, - "customresource_group 3": 37, - "customresource_kind 1": 5, - "customresource_kind 2": 17, - "customresource_kind 3": 38, - "customresource_version 1": 6, - "customresource_version 2": 18, - "customresource_version 3": 39, - "deployment": 19, - "hostname": 7, - "instance 1": 8, - "instance 2": 20, - "instance 3": 40, - "job 1": 9, - "job 2": 21, - "job 3": 41, - "name": 0, - "namespace 1": 10, - "namespace 2": 22, - "namespace 3": 42, - "parent_group": 43, - "parent_kind": 44, - "parent_name": 45, - "parent_namespace": 46, - "prometheus 1": 11, - "prometheus 2": 23, - "prometheus 3": 47, - "receive 1": 25, - "receive 2": 29, - "receive 3": 48, - "replica 1": 26, - "replica 2": 30, - "replica 3": 49, - "service": 31, - "tenant_id 1": 27, - "tenant_id 2": 32, - "tenant_id 3": 50 - }, - "renameByName": { - "Time 2": "", - "Value #C": "", - "cluster_id 1": "Cluster ID", - "customresource_kind 2": "", - "deployment": "API Workload (Deployment)", - "exported_namespace 1": "API Namespace", - "exported_namespace 2": "", - "hostname": "Hostname", - "name": "API Name (HTTPRoute)", - "namespace 1": "Namespace", - "owner": "Owner", - "parent_name": "Gateway name", - "parent_namespace": "Gateway namespace", - "service": "Service" - } - } - }, - { - "id": "filterFieldsByName", - "options": { - "include": { - "names": [ - "Hostname", - "Gateway name", - "Gateway namespace", - "Cluster ID" - ] - } - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Requests per second, broken down by response code e.g. 200, 302, 403, 500", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "A" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "B" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "light-yellow", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "C" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "light-green", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 6, - "w": 9, - "x": 0, - "y": 11 - }, - "id": 156, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{}[5m])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "total", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{}[5m])) by (destination_service_name, response_code) * on(destination_service_name) group_left(name) (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "legendFormat": "{{response_code}}", - "range": true, - "refId": "B" - } - ], - "title": "request breakdown by code (req/s)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total requests per second", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 2, - "x": 9, - "y": 11 - }, - "id": 137, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum(rate(istio_requests_total{}[5m])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "C" - } - ], - "title": "total", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total number of responses.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "decimals": 0, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 2, - "x": 11, - "y": 11 - }, - "id": 149, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum(increase(istio_requests_total{}[$__range])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "C" - } - ], - "title": "total", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Time it takes to process a request", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "ms" - }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "A" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "super-light-blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "C" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "light-blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "B" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "blue", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 6, - "w": 9, - "x": 13, - "y": 11 - }, - "id": 129, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.95, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name)) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "legendFormat": "P95", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name)) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "P90", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name)) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "legendFormat": "P99", - "range": true, - "refId": "B" - } - ], - "title": "request latency (percentiles)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "99% of API requests were completed inside this value, while the remaining 1% took longer.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "ms" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 2, - "x": 22, - "y": 11 - }, - "id": 139, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name)) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "instant": true, - "legendFormat": "P99 {{destination_service_name}}", - "range": false, - "refId": "B" - } - ], - "title": "P99", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Requests per second that resulted in a success (2xx, 3xx).", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "light-green", - "mode": "fixed" - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 2, - "x": 9, - "y": 13 - }, - "id": 147, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum(rate(istio_requests_total{response_code=~\"2.*|3.*\"}[5m])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "C" - } - ], - "title": "success", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total number of success response (2xx, 3xx).", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "light-green", - "mode": "fixed" - }, - "decimals": 0, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 2, - "x": 11, - "y": 13 - }, - "id": 150, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum(increase(istio_requests_total{response_code=~\"2.*|3.*\"}[$__range])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "C" - } - ], - "title": "success", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "95% of API requests were completed inside this value, while the remaining 5% took longer.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "light-blue", - "mode": "fixed" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "ms" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 2, - "x": 22, - "y": 13 - }, - "id": 146, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "histogram_quantile(0.95, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name)) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "C" - } - ], - "title": "P95", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Requests per second that resulted in an error (4xx, 5xx).", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "light-yellow", - "mode": "fixed" - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 2, - "x": 9, - "y": 15 - }, - "id": 148, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum(rate(istio_requests_total{response_code=~\"4.*|5.*\"}[5m])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "C" - } - ], - "title": "error", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total number of error response (4xx, 5xx).", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "light-yellow", - "mode": "fixed" - }, - "decimals": 0, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 2, - "x": 11, - "y": 15 - }, - "id": 151, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum(increase(istio_requests_total{response_code=~\"4.*|5.*\"}[$__range])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "C" - } - ], - "title": "error", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Shows that 90% of API requests were completed inside this value, while the remaining 10% took longer.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "super-light-blue", - "mode": "fixed" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "ms" - }, - "overrides": [] - }, - "gridPos": { - "h": 2, - "w": 2, - "x": 22, - "y": 15 - }, - "id": 138, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name)) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "instant": true, - "legendFormat": "P90 {{destination_service_name}}", - "range": false, - "refId": "A" - } - ], - "title": "P90", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Requests per second, broken down by response code e.g. 200, 302, 403, 500", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "A" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "B" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "light-yellow", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "C" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "light-green", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 9, - "w": 9, - "x": 0, - "y": 17 - }, - "id": 99, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{}[5m])) by (cluster_id, destination_service_name) * on(cluster_id, destination_service_name) group_right() (group without(instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "\"{{cluster_id}}\" total", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{}[5m])) by (cluster_id, destination_service_name, response_code) * on(cluster_id, destination_service_name) group_left(name) (group without(instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "legendFormat": "\"{{cluster_id}}\" {{response_code}}", - "range": true, - "refId": "B" - } - ], - "title": "request breakdown by cluster and code (req/s)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Time it takes to process a request", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "ms" - }, - "overrides": [ - { - "matcher": { - "id": "byFrameRefID", - "options": "A" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "super-light-blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "C" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "light-blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byFrameRefID", - "options": "B" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "blue", - "mode": "fixed" - } - } - ] - } - ] - }, - "gridPos": { - "h": 9, - "w": 9, - "x": 13, - "y": 17 - }, - "id": 157, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.95, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name, cluster_id)) * on(destination_service_name, cluster_id) group_right() (group without(instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "legendFormat": "\"{{cluster_id}}\" P95", - "range": true, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name, cluster_id)) * on(destination_service_name, cluster_id) group_right() (group without(instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "\"{{cluster_id}}\" P90", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name, cluster_id)) * on(destination_service_name, cluster_id) group_right() (group without(instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$api\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "legendFormat": "\"{{cluster_id}}\" P99", - "range": true, - "refId": "B" - } - ], - "title": "request latency by cluster (percentiles)", - "type": "timeseries" - } - ], - "refresh": "30s", - "schemaVersion": 38, - "style": "dark", - "tags": [ - "kuadrant" - ], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "Kuadrant-Thanos-Hub", - "value": "Kuadrant-Thanos-Hub" - }, - "hide": 0, - "includeAll": false, - "label": "Datasource", - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(gatewayapi_httproute_labels, name)", - "description": "Name of the API", - "hide": 0, - "includeAll": true, - "label": "API name", - "multi": true, - "name": "api", - "options": [], - "query": { - "query": "label_values(gatewayapi_httproute_labels, name)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "30s", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "App Developer Dashboard", - "uid": "J_sdY4-Ik", - "version": 1, - "weekStart": "" -} diff --git a/dev/kuadrant-operator/examples/dashboards/business_user.json b/dev/kuadrant-operator/examples/dashboards/business_user.json deleted file mode 100644 index a5cc6078..00000000 --- a/dev/kuadrant-operator/examples/dashboards/business_user.json +++ /dev/null @@ -1,768 +0,0 @@ -{ - "__requires": [ - { - "type": "panel", - "id": "dashlist", - "name": "Dashboard list", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "9.5.3" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "state-timeline", - "name": "State timeline", - "version": "" - }, - { - "type": "panel", - "id": "table", - "name": "Table", - "version": "" - }, - { - "type": "panel", - "id": "text", - "name": "Text", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 1, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "description": "", - "gridPos": { - "h": 7, - "w": 6, - "x": 0, - "y": 0 - }, - "id": 162, - "options": { - "maxItems": 10, - "query": "", - "showHeadings": false, - "showRecentlyViewed": false, - "showSearch": true, - "showStarred": false, - "tags": [ - "kuadrant" - ] - }, - "pluginVersion": "9.5.3", - "title": "Kuadrant Dashboards", - "type": "dashlist" - }, - { - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "description": "", - "gridPos": { - "h": 7, - "w": 18, - "x": 6, - "y": 0 - }, - "id": 150, - "options": { - "code": { - "language": "plaintext", - "showLineNumbers": false, - "showMiniMap": false - }, - "content": "#### Business Overview of APIs\n\nThe panels below are grouped by APIs, realized by a [Gateway API HTTPRoute](https://gateway-api.sigs.k8s.io/concepts/api-overview/#httproute). Each panel provides a comprehensive overview of request and error metrics associated with each API. Additionally, these panels include a heatmap that visualizes requests per second, offering real-time insights into the traffic patterns and operational status of your services.\n\n*Important: HTTPRoutes must include a \"service\" and \"deployment\" label with a value that matches the name of the service & deployment being routed to. eg. \"service=myapp, deployment=myapp\"*\n", - "mode": "markdown" - }, - "pluginVersion": "9.5.3", - "title": "Business User Dashboard", - "type": "text" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 7 - }, - "id": 6, - "panels": [], - "repeat": "route_name", - "title": "\"$route_name\" API", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Aggregated rate of requests per API (HTTPRoute). The API name can be cross referenced with the API list to see additional details.\n\nNote: HTTPRoutes require a label `deployment` with the name of the corresponding Deployment so that istio request metrics can be paired with HTTPRoute metrics.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 8 - }, - "id": 4, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{}[5m])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"${route_name}\",exported_namespace=~\"${api_namespace}\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "API: {{name}}", - "range": true, - "refId": "A" - } - ], - "title": "Traffic summary (req/sec)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "req/s Heatmap", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-blues" - }, - "custom": { - "fillOpacity": 70, - "lineWidth": 0, - "spanNulls": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 8 - }, - "id": 22, - "options": { - "alignValue": "center", - "legend": { - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "mergeValues": false, - "rowHeight": 0.9, - "showValue": "never", - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum(rate(istio_requests_total{}[5m])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"${route_name}\",exported_namespace=~\"${api_namespace}\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "format": "time_series", - "instant": false, - "legendFormat": "most recent", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{}[5m] offset $__range)) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"${route_name}\",exported_namespace=~\"${api_namespace}\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "format": "time_series", - "hide": false, - "legendFormat": "previous", - "range": true, - "refId": "B" - } - ], - "title": "Historical request comparison (req/sec)", - "type": "state-timeline" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Aggregated rate of requests per endpoint/path.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 15 - }, - "id": 7, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{}[5m])) by (destination_service_name, request_url_path) * on(destination_service_name) group_left() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"${route_name}\",exported_namespace=~\"${api_namespace}\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "{{request_url_path}}", - "range": true, - "refId": "A" - } - ], - "title": "Breakdown by path (req/sec)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "auto", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 15 - }, - "id": 155, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum by(request_url_path) (increase(istio_requests_total{}[$__range]) * on(destination_service_name) group_left() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$route_name\",exported_namespace=~\"$api_namespace\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\"))))", - "format": "table", - "instant": true, - "range": false, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum by(request_url_path) (increase(istio_requests_total{}[$__range] offset $__range) * on(destination_service_name) group_left() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$route_name\",exported_namespace=~\"$api_namespace\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\"))))", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum by(request_url_path) (increase(istio_requests_total{}[$__range]) * on(destination_service_name) group_left() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$route_name\",exported_namespace=~\"$api_namespace\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))) - sum by(request_url_path) (increase(istio_requests_total{}[$__range] offset $__range) * on(destination_service_name) group_left() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$route_name\",exported_namespace=~\"$api_namespace\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\"))))", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "C" - } - ], - "title": "Total requests for selected range", - "transformations": [ - { - "id": "seriesToColumns", - "options": { - "byField": "request_url_path" - } - }, - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true, - "Time 1": true, - "Time 2": true, - "Time 3": true, - "connection_security_policy 1": true, - "connection_security_policy 2": true, - "connection_security_policy 3": true, - "destination_app 1": true, - "destination_app 2": true, - "destination_app 3": true, - "destination_canonical_revision 1": true, - "destination_canonical_revision 2": true, - "destination_canonical_revision 3": true, - "destination_canonical_service 1": true, - "destination_canonical_service 2": true, - "destination_canonical_service 3": true, - "destination_cluster 1": true, - "destination_cluster 2": true, - "destination_cluster 3": true, - "destination_port 1": true, - "destination_port 2": true, - "destination_port 3": true, - "destination_principal 1": true, - "destination_principal 2": true, - "destination_principal 3": true, - "destination_service 1": true, - "destination_service 2": true, - "destination_service 3": true, - "destination_service_name 1": true, - "destination_service_name 2": true, - "destination_service_name 3": true, - "destination_service_namespace 1": true, - "destination_service_namespace 2": true, - "destination_service_namespace 3": true, - "destination_version 1": true, - "destination_version 2": true, - "destination_version 3": true, - "destination_workload 1": true, - "destination_workload 2": true, - "destination_workload 3": true, - "destination_workload_namespace 1": true, - "destination_workload_namespace 2": true, - "destination_workload_namespace 3": true, - "endpoint 1": true, - "endpoint 2": true, - "endpoint 3": true, - "instance 1": true, - "instance 2": true, - "instance 3": true, - "istio_io_gateway_name 1": true, - "istio_io_gateway_name 2": true, - "istio_io_gateway_name 3": true, - "job 1": true, - "job 2": true, - "job 3": true, - "namespace 1": true, - "namespace 2": true, - "namespace 3": true, - "pod 1": true, - "pod 2": true, - "pod 3": true, - "pod_template_hash 1": true, - "pod_template_hash 2": true, - "pod_template_hash 3": true, - "prometheus 1": true, - "prometheus 2": true, - "prometheus 3": true, - "receive 1": true, - "receive 2": true, - "receive 3": true, - "replica 1": true, - "replica 2": true, - "replica 3": true, - "reporter 1": true, - "reporter 2": true, - "reporter 3": true, - "request_host 1": true, - "request_host 2": true, - "request_host 3": true, - "request_protocol 1": true, - "request_protocol 2": true, - "request_protocol 3": true, - "response_code 1": true, - "response_code 2": true, - "response_code 3": true, - "response_flags 1": true, - "response_flags 2": true, - "response_flags 3": true, - "service 1": true, - "service 2": true, - "service 3": true, - "service_istio_io_canonical_name 1": true, - "service_istio_io_canonical_name 2": true, - "service_istio_io_canonical_name 3": true, - "service_istio_io_canonical_revision 1": true, - "service_istio_io_canonical_revision 2": true, - "service_istio_io_canonical_revision 3": true, - "sidecar_istio_io_inject 1": true, - "sidecar_istio_io_inject 2": true, - "sidecar_istio_io_inject 3": true, - "source_canonical_revision 1": true, - "source_canonical_revision 2": true, - "source_canonical_revision 3": true, - "source_canonical_service 1": true, - "source_canonical_service 2": true, - "source_canonical_service 3": true, - "source_cluster 1": true, - "source_cluster 2": true, - "source_cluster 3": true, - "source_principal 1": true, - "source_principal 2": true, - "source_principal 3": true, - "source_workload 1": true, - "source_workload 2": true, - "source_workload 3": true, - "source_workload_namespace 1": true, - "source_workload_namespace 2": true, - "source_workload_namespace 3": true, - "tenant_id 1": true, - "tenant_id 2": true, - "tenant_id 3": true - }, - "indexByName": {}, - "renameByName": { - "Value #A": "Most recent", - "Value #B": "Previous", - "Value #C": "Increase/Decrease", - "request_url_path": "Request path" - } - } - } - ], - "type": "table" - } - ], - "refresh": "30s", - "schemaVersion": 38, - "style": "dark", - "tags": [ - "kuadrant" - ], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "Kuadrant-Thanos-Hub", - "value": "Kuadrant-Thanos-Hub" - }, - "hide": 0, - "includeAll": false, - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(kube_namespace_created, exported_namespace)", - "description": "Namespace of HTTPRoute resources", - "hide": 0, - "includeAll": true, - "label": "API/HTTPRoute Namespace", - "multi": true, - "name": "api_namespace", - "options": [], - "query": { - "query": "label_values(kube_namespace_created, exported_namespace)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(gatewayapi_httproute_labels, name)", - "description": "Name of the HTTPRoute resource", - "hide": 0, - "includeAll": true, - "label": "API/Route Name", - "multi": true, - "name": "route_name", - "options": [], - "query": { - "query": "label_values(gatewayapi_httproute_labels, name)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-30m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "30s", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Business User Dashboard", - "uid": "jA3LDk-Iz", - "version": 1, - "weekStart": "" -} diff --git a/dev/kuadrant-operator/examples/dashboards/controller-resources-metrics.json b/dev/kuadrant-operator/examples/dashboards/controller-resources-metrics.json deleted file mode 100644 index 1658cc8e..00000000 --- a/dev/kuadrant-operator/examples/dashboards/controller-resources-metrics.json +++ /dev/null @@ -1,339 +0,0 @@ -{ - "__requires": [ - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 20, - "gradientMode": "scheme", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 0 - }, - "id": 2, - "interval": "1m", - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.4.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "rate(process_cpu_seconds_total{job=\"$job\", namespace=\"$namespace\", pod=\"$pod\"}[5m]) * 100", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "Pod: {{pod}} | Container: {{container}}", - "refId": "A", - "step": 10 - } - ], - "title": "Controller CPU Usage", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 20, - "gradientMode": "scheme", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 0 - }, - "id": 4, - "interval": "1m", - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.4.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "process_resident_memory_bytes{job=\"$job\", namespace=\"$namespace\", pod=\"$pod\"}", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "Pod: {{pod}} | Container: {{container}}", - "refId": "A", - "step": 10 - } - ], - "title": "Controller Memory Usage", - "type": "timeseries" - } - ], - "refresh": "", - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "Kuadrant-Thanos-Hub", - "value": "Kuadrant-Thanos-Hub" - }, - "hide": 0, - "includeAll": false, - "label": "Datasource", - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(controller_runtime_reconcile_total{namespace=~\"$namespace\"}, job)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "job", - "options": [], - "query": { - "query": "label_values(controller_runtime_reconcile_total{namespace=~\"$namespace\"}, job)", - "refId": "StandardVariableQuery" - }, - "refresh": 2, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": { - "selected": false, - "text": "observability", - "value": "observability" - }, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(controller_runtime_reconcile_total, namespace)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "namespace", - "options": [], - "query": { - "query": "label_values(controller_runtime_reconcile_total, namespace)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": { - "selected": false, - "text": "All", - "value": "$__all" - }, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(controller_runtime_reconcile_total{namespace=~\"$namespace\", job=\"$job\"}, pod)", - "hide": 2, - "includeAll": true, - "label": "pod", - "multi": true, - "name": "pod", - "options": [], - "query": { - "query": "label_values(controller_runtime_reconcile_total{namespace=~\"$namespace\", job=\"$job\"}, pod)", - "refId": "StandardVariableQuery" - }, - "refresh": 2, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "Controller-Resources-Metrics", - "weekStart": "" -} diff --git a/dev/kuadrant-operator/examples/dashboards/controller-runtime-metrics.json b/dev/kuadrant-operator/examples/dashboards/controller-runtime-metrics.json deleted file mode 100644 index e2c8ec6e..00000000 --- a/dev/kuadrant-operator/examples/dashboards/controller-runtime-metrics.json +++ /dev/null @@ -1,776 +0,0 @@ -{ - "__requires": [ - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "links": [], - "liveNow": false, - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 9, - "panels": [], - "title": "Reconciliation Metrics", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total number of reconciliations per controller", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 20, - "gradientMode": "scheme", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "cpm" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 7, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "sum(rate(controller_runtime_reconcile_total{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, pod)", - "interval": "", - "legendFormat": "{{instance}} {{pod}}", - "range": true, - "refId": "A" - } - ], - "title": "Total Reconciliation Count Per Controller", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total number of reconciliation errors per controller", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 20, - "gradientMode": "scheme", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "cpm" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 1 - }, - "id": 6, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": true, - "expr": "sum(rate(controller_runtime_reconcile_errors_total{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, pod)", - "interval": "", - "legendFormat": "{{instance}} {{pod}}", - "range": true, - "refId": "A" - } - ], - "title": "Reconciliation Error Count Per Controller", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 8 - }, - "id": 11, - "panels": [], - "title": "Work Queue Metrics", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "How long in seconds an item stays in workqueue before being requested", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "normal" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 9 - }, - "id": 13, - "options": { - "legend": { - "calcs": [ - "max", - "mean" - ], - "displayMode": "list", - "placement": "right" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "histogram_quantile(0.50, sum(rate(workqueue_queue_duration_seconds_bucket{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, name, le))", - "interval": "", - "legendFormat": "P50 {{name}} {{instance}} ", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "histogram_quantile(0.90, sum(rate(workqueue_queue_duration_seconds_bucket{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, name, le))", - "hide": false, - "interval": "", - "legendFormat": "P90 {{name}} {{instance}} ", - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, name, le))", - "hide": false, - "interval": "", - "legendFormat": "P99 {{name}} {{instance}} ", - "refId": "C" - } - ], - "title": "Seconds For Items Stay In Queue (before being requested) (P50, P90, P99)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 20, - "gradientMode": "scheme", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 9 - }, - "id": 15, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "8.4.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "sum(rate(workqueue_adds_total{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, name)", - "interval": "", - "legendFormat": "{{name}} {{instance}}", - "refId": "A" - } - ], - "title": "Work Queue Add Rate", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "How long in seconds processing an item from workqueue takes.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 10, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 16 - }, - "id": 19, - "options": { - "legend": { - "calcs": [ - "max", - "mean" - ], - "displayMode": "table", - "placement": "right" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "histogram_quantile(0.50, sum(rate(workqueue_work_duration_seconds_bucket{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, name, le))", - "interval": "", - "legendFormat": "P50 {{name}} {{instance}} ", - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "histogram_quantile(0.90, sum(rate(workqueue_work_duration_seconds_bucket{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, name, le))", - "hide": false, - "interval": "", - "legendFormat": "P90 {{name}} {{instance}} ", - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "histogram_quantile(0.99, sum(rate(workqueue_work_duration_seconds_bucket{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, name, le))", - "hide": false, - "interval": "", - "legendFormat": "P99 {{name}} {{instance}} ", - "refId": "C" - } - ], - "title": "Seconds Processing Items From WorkQueue (P50, P90, P99)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total number of retries handled by workqueue", - "fieldConfig": { - "defaults": { - "color": { - "mode": "continuous-GrYlRd" - }, - "custom": { - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 20, - "gradientMode": "scheme", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 3, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 16 - }, - "id": 17, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom" - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "exemplar": true, - "expr": "sum(rate(workqueue_retries_total{job=\"$job\", namespace=\"$namespace\"}[5m])) by (instance, name)", - "interval": "", - "legendFormat": "{{name}} {{instance}} ", - "refId": "A" - } - ], - "title": "Work Queue Retries Rate", - "type": "timeseries" - } - ], - "refresh": "", - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "Kuadrant-Thanos-Hub", - "value": "Kuadrant-Thanos-Hub" - }, - "hide": 0, - "includeAll": false, - "label": "Datasource", - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(controller_runtime_reconcile_total{namespace=~\"$namespace\"}, job)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "job", - "options": [], - "query": { - "query": "label_values(controller_runtime_reconcile_total{namespace=~\"$namespace\"}, job)", - "refId": "StandardVariableQuery" - }, - "refresh": 2, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(controller_runtime_reconcile_total, namespace)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "namespace", - "options": [], - "query": { - "query": "label_values(controller_runtime_reconcile_total, namespace)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": { - "selected": true, - "text": [ - "All" - ], - "value": [ - "$__all" - ] - }, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(controller_runtime_reconcile_total{namespace=~\"$namespace\", job=~\"$job\"}, pod)", - "hide": 2, - "includeAll": true, - "label": "pod", - "multi": true, - "name": "pod", - "options": [], - "query": { - "query": "label_values(controller_runtime_reconcile_total{namespace=~\"$namespace\", job=~\"$job\"}, pod)", - "refId": "StandardVariableQuery" - }, - "refresh": 2, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "Controller-Runtime-Metrics", - "weekStart": "" -} diff --git a/dev/kuadrant-operator/examples/dashboards/dns-operator.json b/dev/kuadrant-operator/examples/dashboards/dns-operator.json deleted file mode 100644 index db365658..00000000 --- a/dev/kuadrant-operator/examples/dashboards/dns-operator.json +++ /dev/null @@ -1,622 +0,0 @@ -{ - "__requires": [ - { - "type": "panel", - "id": "gauge", - "name": "Gauge", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "9.5.3" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 9, - "title": "Health Check Probes", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 16, - "w": 3, - "x": 0, - "y": 1 - }, - "id": 7, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "count(kuadrant_dnshealthcheckprobe_healthy_status == 1)", - "format": "time_series", - "instant": false, - "interval": "1", - "legendFormat": "healthy", - "range": true, - "refId": "Healthy" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "count(kuadrant_dnshealthcheckprobe_healthy_status != 1) or vector(0)", - "hide": false, - "instant": true, - "interval": "1", - "legendFormat": "unhealthy", - "range": false, - "refId": "Unhealthy" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "count(kuadrant_dnshealthcheckprobe_healthy_status)", - "hide": false, - "legendFormat": "__auto", - "range": true, - "refId": "probe_count" - } - ], - "title": "Number of active probes", - "transformations": [ - { - "id": "configFromData", - "options": { - "applyTo": { - "id": "byType", - "options": "number" - }, - "configRefId": "probe_count", - "mappings": [ - { - "fieldName": "count(kuadrant_dnshealthcheckprobe_healthy_status)", - "handlerKey": "max" - } - ] - } - } - ], - "type": "gauge" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 21, - "x": 3, - "y": 1 - }, - "id": 3, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum by (client, code) (rate(http_client_requests_total{client=\"probe\"}[15m]))", - "legendFormat": "{{code}}", - "range": true, - "refId": "A" - } - ], - "title": "Probe request rate by response code (req/s)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 21, - "x": 3, - "y": 8 - }, - "id": 6, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.99, sum by(le) (rate(http_client_request_latency_seconds_bucket{client=\"probe\",code=\"200\"}[15m])))", - "legendFormat": "99%ile", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.95, sum by(le) (rate(http_client_request_latency_seconds_bucket{client=\"probe\",code=\"200\"}[15m])))", - "hide": false, - "legendFormat": "95%ile", - "range": true, - "refId": "B" - } - ], - "title": "Probe latency (seconds)", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 17 - }, - "id": 8, - "panels": [], - "title": "DNS Provider", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 18 - }, - "id": 5, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.99, sum by(le, client) (rate(http_client_request_latency_seconds_bucket{client=~\"aws|gcp|azure\",code=\"200\"}[15m])))", - "legendFormat": "{{client}}:99%ile", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.95, sum by(le, client) (rate(http_client_request_latency_seconds_bucket{client=~\"aws|gcp|azure\",code=\"200\"}[15m])))", - "hide": false, - "legendFormat": "{{client}}:95%ile", - "range": true, - "refId": "B" - } - ], - "title": "Provider latency (seconds)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisGridShow": true, - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "smooth", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 18 - }, - "id": 2, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum by (client, code) (rate(http_client_requests_total{client=~\"aws|gcp|azure\"}[15m]))", - "hide": false, - "legendFormat": "{{client}}:{{code}}", - "range": true, - "refId": "A" - } - ], - "title": "Provider request rate by response code (req/s)", - "type": "timeseries" - } - ], - "refresh": "30s", - "schemaVersion": 38, - "style": "dark", - "tags": [ - "kuadrant" - ], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "Kuadrant-Thanos-Hub", - "value": "Kuadrant-Thanos-Hub" - }, - "hide": 0, - "includeAll": false, - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - } - ] - }, - "time": { - "from": "now-30m", - "to": "now" - }, - "timepicker": {}, - "timezone": "browser", - "title": "DNS Operator overview", - "uid": "f4f80fb8-6da0-4260-b534-2549cd25210fd", - "version": 1, - "weekStart": "monday" -} diff --git a/dev/kuadrant-operator/examples/dashboards/kustomization.yaml b/dev/kuadrant-operator/examples/dashboards/kustomization.yaml deleted file mode 100644 index 7deaabe2..00000000 --- a/dev/kuadrant-operator/examples/dashboards/kustomization.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -namespace: monitoring - -# grafana-dashboard dashboards as configmaps -configMapGenerator: -- name: grafana-app-developer - files: - - ./app_developer.json -- name: grafana-business-user - files: - - ./business_user.json -- name: grafana-platform-engineer - files: - - ./platform_engineer.json -- name: grafana-controller-runtime-metrics - files: - - ./controller-runtime-metrics.json -- name: grafana-controller-resources-metrics - files: - - ./controller-resources-metrics.json -- name: grafana-dns-operator - files: - - ./dns-operator.json - -generatorOptions: - disableNameSuffixHash: true diff --git a/dev/kuadrant-operator/examples/dashboards/platform_engineer.json b/dev/kuadrant-operator/examples/dashboards/platform_engineer.json deleted file mode 100644 index 336d3f2c..00000000 --- a/dev/kuadrant-operator/examples/dashboards/platform_engineer.json +++ /dev/null @@ -1,2616 +0,0 @@ -{ - "__requires": [ - { - "type": "panel", - "id": "dashlist", - "name": "Dashboard list", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "9.5.3" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "state-timeline", - "name": "State timeline", - "version": "" - }, - { - "type": "panel", - "id": "table", - "name": "Table", - "version": "" - }, - { - "type": "panel", - "id": "text", - "name": "Text", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "target": { - "limit": 100, - "matchAny": false, - "tags": [], - "type": "dashboard" - }, - "type": "dashboard" - } - ] - }, - "description": "APIs", - "editable": true, - "fiscalYearStartMonth": 0, - "gnetId": 7630, - "graphTooltip": 1, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "description": "", - "gridPos": { - "h": 7, - "w": 6, - "x": 0, - "y": 0 - }, - "id": 152, - "options": { - "maxItems": 10, - "query": "", - "showHeadings": false, - "showRecentlyViewed": false, - "showSearch": true, - "showStarred": false, - "tags": [ - "kuadrant" - ] - }, - "pluginVersion": "9.5.3", - "title": "Kuadrant Dashboards", - "type": "dashlist" - }, - { - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "description": "", - "gridPos": { - "h": 7, - "w": 18, - "x": 6, - "y": 0 - }, - "id": 150, - "options": { - "code": { - "language": "plaintext", - "showLineNumbers": false, - "showMiniMap": false - }, - "content": "#### Overview of Gateways, Policies and APIs\n\nThe panels below are grouped by Gateways and APIs. A Gateway is a [Gateway API defined gateway](https://gateway-api.sigs.k8s.io/concepts/api-overview/#gateway) resource. An API is realised by a [Gateway API HTTPRoute](https://gateway-api.sigs.k8s.io/concepts/api-overview/#httproute).\nAny policies [attached to the Gateways and APIs](https://gateway-api.sigs.k8s.io/geps/gep-713/) will be shown, as well as summary request and error metrics for APIs.\n\n*Important: HTTPRoutes must include a \"service\" and \"deployment\" label with a value that matches the name of the service & deployment being routed to. eg. \"service=myapp, deployment=myapp\"*", - "mode": "markdown" - }, - "pluginVersion": "9.5.3", - "title": "Platform Engineer Dashboard", - "type": "text" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 7 - }, - "id": 128, - "panels": [], - "title": "Gateways", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total number of Gateway API gateways", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "mappings": [], - "noValue": "0", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 0, - "y": 8 - }, - "id": 146, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "count(gatewayapi_gateway_info{exported_namespace=~\"${gateway_namespace}\"})", - "instant": true, - "range": false, - "refId": "A" - } - ], - "title": "Total", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "List of all Gateways, their class and status.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Programmed" - }, - "properties": [ - { - "id": "unit", - "value": "bool" - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Accepted" - }, - "properties": [ - { - "id": "unit", - "value": "bool" - } - ] - } - ] - }, - "gridPos": { - "h": 9, - "w": 21, - "x": 3, - "y": 8 - }, - "id": 115, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "frameIndex": 0, - "showHeader": true, - "sortBy": [] - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_gateway_info{exported_namespace=~\"${gateway_namespace}\"}", - "format": "table", - "instant": true, - "range": false, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_gateway_status{exported_namespace=~\"${gateway_namespace}\",type=\"Programmed\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_gateway_status{exported_namespace=~\"${gateway_namespace}\",type=\"Accepted\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "C" - } - ], - "title": "Gateways", - "transformations": [ - { - "id": "concatenate", - "options": { - "frameNameLabel": "frame", - "frameNameMode": "drop" - } - }, - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true, - "Value #A": true, - "__name__ 1": true, - "__name__ 2": true, - "__name__ 3": true, - "cluster_id 2": true, - "cluster_id 3": true, - "container 1": true, - "container 2": true, - "container 3": true, - "customresource_group 1": true, - "customresource_group 2": true, - "customresource_group 3": true, - "customresource_kind 1": true, - "customresource_kind 2": true, - "customresource_kind 3": true, - "customresource_version 1": true, - "customresource_version 2": true, - "customresource_version 3": true, - "exported_namespace 1": false, - "exported_namespace 2": true, - "exported_namespace 3": true, - "instance 1": true, - "instance 2": true, - "instance 3": true, - "job 1": true, - "job 2": true, - "job 3": true, - "name 1": false, - "name 2": true, - "name 3": true, - "namespace 1": true, - "namespace 2": true, - "namespace 3": true, - "prometheus 1": true, - "prometheus 2": true, - "prometheus 3": true, - "receive 1": true, - "receive 2": true, - "receive 3": true, - "replica 1": true, - "replica 2": true, - "replica 3": true, - "tenant_id 1": true, - "tenant_id 2": true, - "tenant_id 3": true, - "type 1": true, - "type 2": true - }, - "indexByName": { - "Time": 1, - "Value #A": 13, - "Value #B": 26, - "Value #C": 39, - "__name__ 1": 2, - "__name__ 2": 14, - "__name__ 3": 27, - "container 1": 3, - "container 2": 15, - "container 3": 28, - "customresource_group 1": 4, - "customresource_group 2": 16, - "customresource_group 3": 29, - "customresource_kind 1": 5, - "customresource_kind 2": 17, - "customresource_kind 3": 30, - "customresource_version 1": 6, - "customresource_version 2": 18, - "customresource_version 3": 31, - "exported_namespace 1": 8, - "exported_namespace 2": 19, - "exported_namespace 3": 32, - "gatewayclass_name": 0, - "instance 1": 9, - "instance 2": 20, - "instance 3": 33, - "job 1": 10, - "job 2": 21, - "job 3": 34, - "name 1": 7, - "name 2": 22, - "name 3": 35, - "namespace 1": 11, - "namespace 2": 23, - "namespace 3": 36, - "prometheus 1": 12, - "prometheus 2": 24, - "prometheus 3": 37, - "type 1": 25, - "type 2": 38 - }, - "renameByName": { - "Time": "", - "Value #B": "Programmed", - "Value #C": "Accepted", - "__name__ 3": "", - "cluster_id 1": "Cluster ID", - "exported_namespace 1": "Namespace", - "gatewayclass_name": "Gateway Class", - "job 1": "", - "name 1": "Name" - } - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total Gateways with an [Accepted and Programmed](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayConditionType) state of True", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "light-green", - "mode": "fixed" - }, - "mappings": [], - "noValue": "0", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 0, - "y": 11 - }, - "id": 147, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "count(gatewayapi_gateway_status{exported_namespace=~\"${gateway_namespace}\",type=\"Accepted\"} > 0 and ignoring(type) gatewayapi_gateway_status{exported_namespace=~\"${gateway_namespace}\",type=\"Programmed\"} > 0)", - "instant": true, - "range": false, - "refId": "A" - } - ], - "title": "Healthy", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total Gateways with a False or missing [Accepted or Programmed](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1.GatewayConditionType) state.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "light-yellow", - "mode": "fixed" - }, - "mappings": [], - "noValue": "0", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 3, - "x": 0, - "y": 14 - }, - "id": 148, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "count(\n(gatewayapi_gateway_info{exported_namespace=~\"${gateway_namespace}\"} * on(name, exported_namespace, instance) group_left gatewayapi_gateway_status{exported_namespace=~\"${gateway_namespace}\",type=\"Programmed\"} < 1)\nor ignoring(type) \n(gatewayapi_gateway_info{exported_namespace=~\"${gateway_namespace}\"} * on(name, exported_namespace, instance) group_left gatewayapi_gateway_status{exported_namespace=~\"${gateway_namespace}\",type=\"Accepted\"} < 1)\n)", - "instant": true, - "range": false, - "refId": "A" - } - ], - "title": "Unhealthy", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "List of all listeners in Gateways", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 17 - }, - "id": 154, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "frameIndex": 0, - "showHeader": true, - "sortBy": [] - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_gateway_listener_info{exported_namespace=~\"${gateway_namespace}\"}", - "format": "table", - "instant": true, - "range": false, - "refId": "A" - } - ], - "title": "Gateway Listeners", - "transformations": [ - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true, - "Value": true, - "Value #A": true, - "Value #B": true, - "Value #C": true, - "Value #D": true, - "__name__": true, - "container": true, - "customresource_group": true, - "customresource_kind": true, - "customresource_version": true, - "instance": true, - "job": true, - "namespace": false, - "prometheus": true, - "receive": true, - "replica": true, - "target_group": true, - "tenant_id": true - }, - "indexByName": { - "Time": 0, - "Value": 21, - "__name__": 1, - "allowed_routes_namespaces_from": 19, - "cluster_id": 20, - "container": 2, - "customresource_group": 3, - "customresource_kind": 4, - "customresource_version": 5, - "hostname": 11, - "instance": 7, - "job": 8, - "listener_name": 12, - "name": 6, - "namespace": 9, - "port": 13, - "prometheus": 10, - "protocol": 14, - "receive": 15, - "replica": 16, - "tenant_id": 17, - "tls_mode": 18 - }, - "renameByName": { - "Value #A": "", - "allowed_routes_namespaces_from": "Allowed Routes NS", - "cluster_id": "Cluster ID", - "customresource_kind": "Kind", - "exported_namespace": "Namespace", - "hostname": "Hostname", - "listener_name": "Listener", - "name": "Gateway", - "namespace": "Gateway NS", - "port": "Port", - "protocol": "Protocol", - "receive": "", - "target_kind": "Target Kind", - "target_name": "Target Name", - "tls_mode": "TLS Mode" - } - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "List of all Policies targeting a Gateway", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 17 - }, - "id": 117, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "frameIndex": 0, - "showHeader": true, - "sortBy": [] - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_tlspolicy_target_info{exported_namespace=~\"${gateway_namespace}\",target_kind=\"Gateway\"}", - "format": "table", - "instant": true, - "range": false, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_ratelimitpolicy_target_info{exported_namespace=~\"${gateway_namespace}\",target_kind=\"Gateway\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_authpolicy_target_info{exported_namespace=~\"${gateway_namespace}\",target_kind=\"Gateway\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_dnspolicy_target_info{exported_namespace=~\"${gateway_namespace}\",target_kind=\"Gateway\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "D" - } - ], - "title": "Gateway Policies", - "transformations": [ - { - "id": "merge", - "options": {} - }, - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true, - "Value": true, - "Value #A": true, - "Value #B": true, - "Value #C": true, - "Value #D": true, - "__name__": true, - "container": true, - "customresource_group": true, - "customresource_version": true, - "instance": true, - "job": true, - "namespace": true, - "prometheus": true, - "receive": true, - "replica": true, - "target_group": true, - "tenant_id": true - }, - "indexByName": { - "Time": 0, - "Value #A": 15, - "Value #B": 16, - "__name__": 1, - "container": 2, - "customresource_group": 3, - "customresource_kind": 4, - "customresource_version": 5, - "exported_namespace": 7, - "instance": 8, - "job": 9, - "name": 6, - "namespace": 10, - "prometheus": 11, - "target_group": 12, - "target_kind": 13, - "target_name": 14 - }, - "renameByName": { - "Value #A": "", - "cluster_id": "Cluster ID", - "customresource_kind": "Kind", - "exported_namespace": "Namespace", - "name": "Name", - "namespace": "Namespace", - "receive": "", - "target_kind": "Target Kind", - "target_name": "Target Name" - } - } - } - ], - "type": "table" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 26 - }, - "id": 145, - "panels": [], - "title": "APIs/HTTPRoutes", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "List of all APIs (HTTPRoutes) and their hostnames & corresponding Deployment names.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 27 - }, - "id": 97, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_httproute_hostname_info{name=~\"${route_name}\",exported_namespace=~\"${api_policy_namespace}\"}", - "format": "table", - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_httproute_labels{name=~\"${route_name}\",exported_namespace=~\"${api_policy_namespace}\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "B" - } - ], - "title": "APIs/HTTPRoutes", - "transformations": [ - { - "id": "concatenate", - "options": { - "frameNameLabel": "frame", - "frameNameMode": "drop" - } - }, - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true, - "Value #A": true, - "Value #B": true, - "__name__ 1": true, - "__name__ 2": true, - "cluster_id 2": true, - "container 1": true, - "container 2": true, - "customresource_group 1": true, - "customresource_group 2": true, - "customresource_kind 1": true, - "customresource_kind 2": true, - "customresource_version 1": true, - "customresource_version 2": true, - "instance 1": true, - "instance 2": true, - "job 1": true, - "job 2": true, - "name 1": true, - "name 2": true, - "namespace 1": false, - "namespace 2": true, - "prometheus 1": true, - "prometheus 2": true, - "receive 1": true, - "receive 2": true, - "replica 1": true, - "replica 2": true, - "service": true, - "tenant_id 1": true, - "tenant_id 2": true - }, - "indexByName": { - "Time": 0, - "Value #A": 17, - "Value #B": 33, - "__name__ 1": 1, - "__name__ 2": 18, - "cluster_id 1": 9, - "cluster_id 2": 19, - "container 1": 2, - "container 2": 20, - "customresource_group 1": 3, - "customresource_group 2": 21, - "customresource_kind 1": 4, - "customresource_kind 2": 22, - "customresource_version 1": 5, - "customresource_version 2": 23, - "deployment": 6, - "hostname": 7, - "instance 1": 10, - "instance 2": 24, - "job 1": 11, - "job 2": 25, - "name 1": 12, - "name 2": 26, - "namespace 1": 8, - "namespace 2": 27, - "prometheus 1": 13, - "prometheus 2": 28, - "receive 1": 14, - "receive 2": 29, - "replica 1": 15, - "replica 2": 30, - "service": 31, - "tenant_id 1": 16, - "tenant_id 2": 32 - }, - "renameByName": { - "cluster_id 1": "Cluster ID", - "deployment": "Deployment", - "hostname": "Hostname", - "namespace 1": "Namespace", - "prometheus 2": "" - } - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "List of all Policies targeting HTTPRoutes", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 6, - "w": 12, - "x": 12, - "y": 27 - }, - "id": 118, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "frameIndex": 0, - "showHeader": true, - "sortBy": [] - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_tlspolicy_target_info{exported_namespace=~\"${api_policy_namespace}\",target_kind!=\"Gateway\",target_name=~\"${route_name}\"}", - "format": "table", - "instant": true, - "range": false, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_ratelimitpolicy_target_info{exported_namespace=~\"${api_policy_namespace}\",target_kind!=\"Gateway\",target_name=~\"${route_name}\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "B" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_authpolicy_target_info{exported_namespace=~\"${api_policy_namespace}\",target_kind!=\"Gateway\",target_name=~\"${route_name}\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "C" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "gatewayapi_dnspolicy_target_info{exported_namespace=~\"${api_policy_namespace}\",target_kind!=\"Gateway\",target_name=~\"${route_name}\"}", - "format": "table", - "hide": false, - "instant": true, - "range": false, - "refId": "D" - } - ], - "title": "HTTPRoute Policies", - "transformations": [ - { - "id": "merge", - "options": {} - }, - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true, - "Value": true, - "Value #A": true, - "Value #B": true, - "Value #C": true, - "Value #D": true, - "__name__": true, - "container": true, - "customresource_group": true, - "customresource_version": true, - "instance": true, - "job": true, - "namespace": false, - "prometheus": true, - "receive": true, - "replica": true, - "target_group": true, - "tenant_id": true - }, - "indexByName": { - "Time": 0, - "Value #A": 15, - "Value #B": 16, - "__name__": 1, - "container": 2, - "customresource_group": 3, - "customresource_kind": 4, - "customresource_version": 5, - "exported_namespace": 7, - "instance": 8, - "job": 9, - "name": 6, - "namespace": 10, - "prometheus": 11, - "target_group": 12, - "target_kind": 13, - "target_name": 14 - }, - "renameByName": { - "Value #A": "", - "cluster_id": "Cluster ID", - "customresource_kind": "Kind", - "exported_namespace": "Namespace", - "name": "Name", - "namespace": "Namespace", - "target_kind": "Target Kind", - "target_name": "Target Name", - "target_namespace": "Target Namespace" - } - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Aggregated rate of requests per API (HTTPRoute). The API name can be cross referenced with the API list to see additional details.\n\nNote: HTTPRoutes require a label `deployment` with the name of the corresponding Deployment so that istio request metrics can be paired with HTTPRoute metrics.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 33 - }, - "id": 120, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{}[5m])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"${route_name}\",exported_namespace=~\"${api_policy_namespace}\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "API: {{name}}", - "range": true, - "refId": "A" - } - ], - "title": "Total requests (req/sec)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Aggregated rate of requests per API (HTTPRoute). The API name can be cross referenced with the API list to see additional details.\n\nNote: HTTPRoutes require a label `deployment` with the name of the corresponding Deployment so that istio request metrics can be paired with HTTPRoute metrics.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 33 - }, - "id": 153, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{}[5m])) by (destination_service_name) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"${route_name}\",exported_namespace=~\"${api_policy_namespace}\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "API: {{name}}", - "range": true, - "refId": "A" - } - ], - "title": "Total requests (req/sec)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Aggregated request latencies per API (HTTPRoute). 99th and 95th percentiles are shown. The API name can be cross referenced with the API list to see additional details.\n\nNote: HTTPRoutes require a label `deployment` with the name of the corresponding Deployment so that istio request metrics can be paired with HTTPRoute metrics.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineStyle": { - "fill": "solid" - }, - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "ms" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 33 - }, - "id": 137, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name)) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$route_name\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "{{service}} - 99th %ile", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "histogram_quantile(0.95, sum(irate(istio_request_duration_milliseconds_bucket{}[5m])) by (le, destination_service_name)) * on(destination_service_name) group_right() (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{name=~\"$route_name\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "legendFormat": "{{service}} - 95th %ile", - "range": true, - "refId": "C" - } - ], - "title": "Request latency", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "$datasource" - }, - "description": "CPU Usage of all workloads (Deployments) linked to a HTTPRoute. That is, they have a corresponding HTTPRoute with a deployment label.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "normal" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "short" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "quota - requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#F2495C", - "mode": "fixed" - } - }, - { - "id": "custom.fillOpacity", - "value": 0 - }, - { - "id": "custom.lineWidth", - "value": 2 - }, - { - "id": "custom.stacking", - "value": { - "group": false, - "mode": "normal" - } - }, - { - "id": "custom.lineStyle", - "value": { - "dash": [ - 10, - 10 - ], - "fill": "dash" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "quota - limits" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#FF9830", - "mode": "fixed" - } - }, - { - "id": "custom.fillOpacity", - "value": 0 - }, - { - "id": "custom.lineWidth", - "value": 2 - }, - { - "id": "custom.stacking", - "value": { - "group": false, - "mode": "normal" - } - }, - { - "id": "custom.lineStyle", - "value": { - "dash": [ - 10, - 10 - ], - "fill": "dash" - } - } - ] - } - ] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 0, - "y": 40 - }, - "id": 141, - "interval": "1m", - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "8.5.5", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{namespace=~\"$api_policy_namespace\"}\n* on(namespace, pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{namespace=~\"$api_policy_namespace\", workload_type=\"deployment\"}\n) by (workload, workload_type)\n* on(workload) (group by(workload) (label_replace(gatewayapi_httproute_labels{exported_namespace=~\"$api_policy_namespace\",name=~\"$route_name\"}, \"workload\", \"$1\",\"deployment\", \"(.+)\")))", - "format": "time_series", - "legendFormat": "API: {{workload}}", - "range": true, - "refId": "A" - } - ], - "title": "CPU Usage", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "$datasource" - }, - "description": "Memory Usage of all workloads (Deployments) linked to a HTTPRoute. That is, they have a corresponding HTTPRoute with a deployment label.", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "blue", - "mode": "fixed" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "normal" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 0, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "quota - requests" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#F2495C", - "mode": "fixed" - } - }, - { - "id": "custom.fillOpacity", - "value": 0 - }, - { - "id": "custom.lineWidth", - "value": 2 - }, - { - "id": "custom.stacking", - "value": { - "group": false, - "mode": "normal" - } - }, - { - "id": "custom.lineStyle", - "value": { - "dash": [ - 10, - 10 - ], - "fill": "dash" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "quota - limits" - }, - "properties": [ - { - "id": "color", - "value": { - "fixedColor": "#FF9830", - "mode": "fixed" - } - }, - { - "id": "custom.fillOpacity", - "value": 0 - }, - { - "id": "custom.lineWidth", - "value": 2 - }, - { - "id": "custom.stacking", - "value": { - "group": false, - "mode": "normal" - } - }, - { - "id": "custom.lineStyle", - "value": { - "dash": [ - 10, - 10 - ], - "fill": "dash" - } - } - ] - } - ] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 8, - "y": 40 - }, - "id": 143, - "interval": "1m", - "links": [], - "options": { - "legend": { - "calcs": [], - "displayMode": "table", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "8.5.5", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(\n container_memory_working_set_bytes{metrics_path=\"/metrics/cadvisor\", namespace=~\"$api_policy_namespace\", container!=\"\", image!=\"\"}\n * on(namespace, namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{namespace=~\"$api_policy_namespace\", workload_type=\"deployment\"}\n) by (workload, workload_type)\n* on(workload) (group by(workload) (label_replace(gatewayapi_httproute_labels{exported_namespace=~\"$api_policy_namespace\",name=~\"$route_name\"}, \"workload\", \"$1\",\"deployment\", \"(.+)\")))", - "format": "time_series", - "legendFormat": "API: {{workload}}", - "range": true, - "refId": "A" - } - ], - "title": "Memory Usage", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Rate of 4xx & 5xx response codes and total HTTP response code errors by API/HTTPRoute", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "light-yellow", - "mode": "continuous-YlRd" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "axisSoftMin": 0, - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 5, - "gradientMode": "opacity", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 1, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "decimals": 2, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "reqps" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 16, - "y": 40 - }, - "id": 139, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{response_code=~\"4.*|5.*\"}[5m])) by (destination_service_name, response_code) * on(destination_service_name) group_left(name) (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{exported_namespace=~\"${api_policy_namespace}\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "legendFormat": "{{name}} - {{response_code}}", - "range": true, - "refId": "A" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "expr": "sum(rate(istio_requests_total{response_code=~\"4.*|5.*\"}[5m])) by (destination_service_name) * on(destination_service_name) group_left(name) (group without(cluster_id, instance, app_kubernetes_io_instance) (label_replace(gatewayapi_httproute_labels{exported_namespace=~\"${api_policy_namespace}\"}, \"destination_service_name\", \"$1\",\"service\", \"(.+)\")))", - "hide": false, - "legendFormat": "{{name}} - Total Errors", - "range": true, - "refId": "B" - } - ], - "title": "Errors (req/s)", - "type": "timeseries" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 47 - }, - "id": 155, - "panels": [], - "title": "Alerts", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total number of firing alerts, grouped by Alert Name. Note that there may be more than 1 instance of an alert active based on different labels (like pods or namespace). These will only be counted once here if the alertname is the same for each one.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "noValue": "0", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "semi-dark-red", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 0, - "y": 48 - }, - "id": 156, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "count(sum by(alertname, alertstate, severity, cluster_id) (ALERTS{alertstate=\"firing\"}))", - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" - } - ], - "title": "Total Firing", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Currently pending or firing alerts, grouped by Alert Name. Note that there may be more than 1 instance of an alert active based on different labels (like pods or namespace). The total number of instances of an alert in that state is shown in the '# Active` column. For further details on the individual alert instances, check your alerting system.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "filterable": true, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "alertstate" - }, - "properties": [ - { - "id": "mappings", - "value": [ - { - "options": { - "firing": { - "color": "semi-dark-red", - "index": 1, - "text": "Firing" - }, - "pending": { - "color": "orange", - "index": 0, - "text": "Pending" - } - }, - "type": "value" - } - ] - }, - { - "id": "custom.cellOptions", - "value": { - "mode": "basic", - "type": "color-background" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 21, - "x": 3, - "y": 48 - }, - "id": 157, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true, - "sortBy": [] - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum by(alertname, alertstate, severity, cluster_id) (ALERTS{alertstate=\"firing\"} or ALERTS{alertstate=\"pending\"})", - "format": "table", - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" - } - ], - "title": "Currently Active", - "transformations": [ - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true, - "Value": false, - "__name__": true, - "container": true, - "endpoint": true, - "instance": true, - "job": true, - "namespace": true, - "pod": true, - "prometheus": true, - "receive": true, - "replica": true, - "rule_group": true, - "service": true, - "tenant_id": true - }, - "indexByName": { - "Time": 0, - "Value": 2, - "alertname": 1, - "alertstate": 3, - "cluster_id": 4, - "severity": 5 - }, - "renameByName": { - "Value": "# Active", - "alertname": "Name", - "alertstate": "State", - "cluster_id": "Cluster ID", - "severity": "Severity" - } - } - }, - { - "id": "sortBy", - "options": { - "fields": {}, - "sort": [ - { - "field": "Name" - } - ] - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Total number of pending alerts, grouped by Alert Name. Note that there may be more than 1 instance of an alert active based on different labels (like pods or namespace). These will only be counted once here if the alertname is the same for each one.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "noValue": "0", - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "orange", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 3, - "x": 0, - "y": 52 - }, - "id": 159, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "textMode": "auto" - }, - "pluginVersion": "9.5.3", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "count(sum by(alertname, alertstate, severity, cluster_id) (ALERTS{alertstate=\"pending\"}))", - "instant": true, - "legendFormat": "__auto", - "range": false, - "refId": "A" - } - ], - "title": "Total Pending", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "description": "Historical state of any alerts that were pending or firing, grouped by Alert Name. Note that more than 1 instance of an alert may have been active based on different labels (like pods or namespace). For further details on the individual alert instances, check your alerting system.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "fillOpacity": 100, - "lineWidth": 0, - "spanNulls": false - }, - "mappings": [ - { - "options": { - "firing": { - "color": "semi-dark-red", - "index": 1, - "text": "Firing" - }, - "pending": { - "color": "orange", - "index": 0, - "text": "Pending" - } - }, - "type": "value" - }, - { - "options": { - "match": "empty", - "result": { - "color": "light-green", - "index": 2, - "text": "Inactive" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "light-green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 24, - "x": 0, - "y": 56 - }, - "id": 158, - "options": { - "alignValue": "center", - "legend": { - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "mergeValues": true, - "rowHeight": 0.9, - "showValue": "auto", - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "editorMode": "code", - "exemplar": false, - "expr": "sum by(alertname, alertstate, severity, cluster_id) (ALERTS{alertstate=\"firing\"} or ALERTS{alertstate=\"pending\"})", - "format": "table", - "instant": false, - "legendFormat": "{{alertname}}", - "range": true, - "refId": "A" - } - ], - "title": "Active State Timeline", - "transformations": [ - { - "id": "groupingToMatrix", - "options": { - "columnField": "alertname", - "emptyValue": "empty", - "rowField": "Time", - "valueField": "alertstate" - } - }, - { - "id": "convertFieldType", - "options": { - "conversions": [ - { - "destinationType": "time", - "targetField": "Time\\alertname" - } - ], - "fields": {} - } - }, - { - "id": "sortBy", - "options": { - "fields": {}, - "sort": [ - { - "desc": false, - "field": "Time\\alertname" - } - ] - } - } - ], - "type": "state-timeline" - } - ], - "refresh": "30s", - "schemaVersion": 38, - "style": "dark", - "tags": [ - "kuadrant" - ], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "Kuadrant-Thanos-Hub", - "value": "Kuadrant-Thanos-Hub" - }, - "hide": 0, - "includeAll": false, - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "queryValue": "", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "current": { - "selected": false, - "text": "All", - "value": "$__all" - }, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(gatewayapi_gateway_info, exported_namespace)", - "description": "", - "hide": 0, - "includeAll": true, - "label": "Gateway Namespace", - "multi": true, - "name": "gateway_namespace", - "options": [], - "query": { - "query": "label_values(gatewayapi_gateway_info, exported_namespace)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(kube_namespace_created, exported_namespace)", - "description": "Namespace of HTTPRoute & Policy resources", - "hide": 0, - "includeAll": true, - "label": "API/Route & Policy Namespace", - "multi": true, - "name": "api_policy_namespace", - "options": [], - "query": { - "query": "label_values(kube_namespace_created, exported_namespace)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${datasource}" - }, - "definition": "label_values(gatewayapi_httproute_labels, name)", - "description": "Name of the HTTPRoute resource", - "hide": 0, - "includeAll": true, - "label": "API/Route Name", - "multi": true, - "name": "route_name", - "options": [], - "query": { - "query": "label_values(gatewayapi_httproute_labels, name)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "30s", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Platform Engineer Dashboard", - "uid": "djqDaDISk", - "version": 1, - "weekStart": "" -} diff --git a/dev/kuadrant-operator/examples/dnspolicy/application.yaml b/dev/kuadrant-operator/examples/dnspolicy/application.yaml deleted file mode 100644 index e7273e54..00000000 --- a/dev/kuadrant-operator/examples/dnspolicy/application.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: my-route-${DNSPOLICY_NAMESPACE} -spec: - parentRefs: - - kind: Gateway - name: prod-web-istio - namespace: ${DNSPOLICY_NAMESPACE} - hostnames: - - "app.${KUADRANT_ZONE_ROOT_DOMAIN}" - rules: - - backendRefs: - - name: echo - port: 8080 ---- -apiVersion: v1 -kind: Service -metadata: - name: echo -spec: - ports: - - name: http-port - port: 8080 - targetPort: http-port - protocol: TCP - selector: - app: echo ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: echo -spec: - replicas: 1 - selector: - matchLabels: - app: echo - template: - metadata: - labels: - app: echo - spec: - containers: - - name: echo - image: mirror.gcr.io/jmalloc/echo-server - ports: - - name: http-port - containerPort: 8080 - protocol: TCP diff --git a/dev/kuadrant-operator/examples/dnspolicy/aws-dns-provider-secret.yaml b/dev/kuadrant-operator/examples/dnspolicy/aws-dns-provider-secret.yaml deleted file mode 100644 index 82a488d5..00000000 --- a/dev/kuadrant-operator/examples/dnspolicy/aws-dns-provider-secret.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: aws-credentials - namespace: ${DNSPOLICY_NAMESPACE} -type: "kuadrant.io/aws" -stringData: - AWS_ACCESS_KEY_ID: ${KUADRANT_AWS_ACCESS_KEY_ID} - AWS_SECRET_ACCESS_KEY: ${KUADRANT_AWS_SECRET_ACCESS_KEY} - AWS_REGION: ${KUADRANT_AWS_REGION} diff --git a/dev/kuadrant-operator/examples/dnspolicy/dnspolicy-bad-strategy.yaml b/dev/kuadrant-operator/examples/dnspolicy/dnspolicy-bad-strategy.yaml deleted file mode 100644 index a5c2f17e..00000000 --- a/dev/kuadrant-operator/examples/dnspolicy/dnspolicy-bad-strategy.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: kuadrant.io/v1 -kind: DNSPolicy -metadata: - name: prod-web - namespace: ${DNSPOLICY_NAMESPACE} -spec: - targetRef: - name: prod-web-istio - group: gateway.networking.k8s.io - kind: Gateway - providerRefs: - - name: aws-credentials diff --git a/dev/kuadrant-operator/examples/dnspolicy/dnspolicy-exclude-address.yaml b/dev/kuadrant-operator/examples/dnspolicy/dnspolicy-exclude-address.yaml deleted file mode 100644 index a2cdc8ac..00000000 --- a/dev/kuadrant-operator/examples/dnspolicy/dnspolicy-exclude-address.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: kuadrant.io/v1 -kind: DNSPolicy -metadata: - name: prod-web - namespace: ${DNSPOLICY_NAMESPACE} -spec: - targetRef: - name: prod-web-istio - group: gateway.networking.k8s.io - kind: Gateway - providerRefs: - - name: aws-credentials - loadBalancing: - weight: 120 - geo: EU - defaultGeo: true - excludeAddresses: - - "10.89.0.0/16" - - "some.local.domain" - - "127.0.0.1" diff --git a/dev/kuadrant-operator/examples/dnspolicy/dnspolicy-healthchecks.yaml b/dev/kuadrant-operator/examples/dnspolicy/dnspolicy-healthchecks.yaml deleted file mode 100644 index d67110b5..00000000 --- a/dev/kuadrant-operator/examples/dnspolicy/dnspolicy-healthchecks.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: kuadrant.io/v1 -kind: DNSPolicy -metadata: - name: prod-web - namespace: ${DNSPOLICY_NAMESPACE} -spec: - targetRef: - name: prod-web-istio - group: gateway.networking.k8s.io - kind: Gateway - providerRefs: - - name: aws-credentials - loadBalancing: - weight: 120 - geo: EU - defaultGeo: true - healthCheck: - path: /healthz - port: 443 - protocol: HTTPS - failureThreshold: 3 diff --git a/dev/kuadrant-operator/examples/dnspolicy/dnspolicy.yaml b/dev/kuadrant-operator/examples/dnspolicy/dnspolicy.yaml deleted file mode 100644 index d6bfa8e9..00000000 --- a/dev/kuadrant-operator/examples/dnspolicy/dnspolicy.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: kuadrant.io/v1 -kind: DNSPolicy -metadata: - name: prod-web - namespace: ${DNSPOLICY_NAMESPACE} -spec: - targetRef: - name: prod-web-istio - group: gateway.networking.k8s.io - kind: Gateway - providerRefs: - - name: aws-credentials - loadBalancing: - weight: 120 - geo: EU - defaultGeo: true diff --git a/dev/kuadrant-operator/examples/dnspolicy/gateway.yaml b/dev/kuadrant-operator/examples/dnspolicy/gateway.yaml deleted file mode 100644 index e5db812b..00000000 --- a/dev/kuadrant-operator/examples/dnspolicy/gateway.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: gateway.networking.k8s.io/v1 -kind: Gateway -metadata: - name: prod-web-istio - namespace: ${DNSPOLICY_NAMESPACE} -spec: - gatewayClassName: istio - listeners: - - allowedRoutes: - namespaces: - from: All - name: api - hostname: app.${KUADRANT_ZONE_ROOT_DOMAIN} - port: 443 - protocol: HTTPS - tls: - mode: Terminate - certificateRefs: - - name: example-app-tls - kind: Secret diff --git a/dev/kuadrant-operator/examples/dnspolicy/script.sh b/dev/kuadrant-operator/examples/dnspolicy/script.sh deleted file mode 100644 index ffa5c5b3..00000000 --- a/dev/kuadrant-operator/examples/dnspolicy/script.sh +++ /dev/null @@ -1,27 +0,0 @@ -export DNSPOLICY_NAMESPACE=gateway -kubectl create ns gateway -envsubst < examples/dnspolicy/aws-dns-provider-secret.yaml | kubectl apply -f - -envsubst < examples/dnspolicy/gateway.yaml | kubectl apply -f - -envsubst < examples/dnspolicy/dnspolicy.yaml | kubectl apply -f - -envsubst < examples/dnspolicy/application.yaml | kubectl apply -f - - -read -r -p "press enter to cause conflict" -export DNSPOLICY_NAMESPACE=gateway-2 -kubectl create ns gateway-2 -envsubst < examples/dnspolicy/aws-dns-provider-secret.yaml | kubectl apply -f - -envsubst < examples/dnspolicy/gateway.yaml | kubectl apply -f - -envsubst < examples/dnspolicy/dnspolicy-bad-strategy.yaml | kubectl apply -f - -envsubst < examples/dnspolicy/application.yaml | kubectl apply -f - - -read -r -p "press enter to delete conflict" -kubectl delete ns gateway-2 - -read -r -p "press enter to configure bad health checks" -export DNSPOLICY_NAMESPACE=gateway -envsubst < examples/dnspolicy/dnspolicy-healthchecks.yaml | kubectl apply -f - - -read -r -p "press enter to configure good health checks" -kubectl patch dnspolicy prod-web -n ${DNSPOLICY_NAMESPACE} --type='json' -p='[{"op": "replace", "path": "/spec/healthCheck/port", "value":80}]' - -read -r -p "press enter to clean up sample" -kubectl delete ns ${DNSPOLICY_NAMESPACE} diff --git a/dev/kuadrant-operator/examples/external-api-istio.yaml b/dev/kuadrant-operator/examples/external-api-istio.yaml deleted file mode 100644 index 1855043c..00000000 --- a/dev/kuadrant-operator/examples/external-api-istio.yaml +++ /dev/null @@ -1,94 +0,0 @@ -## Note before using this example you will need cert-manager and a clusterissuer setup -apiVersion: networking.istio.io/v1beta1 -kind: ServiceEntry -metadata: - name: external-api -spec: - hosts: - - kuadrant.io - location: MESH_EXTERNAL - resolution: DNS - ports: - - number: 80 - name: http - protocol: HTTP - - number: 443 - name: https - protocol: TLS ---- -apiVersion: networking.istio.io/v1 -kind: DestinationRule -metadata: - name: external-api -spec: - host: 'kuadrant.io' - trafficPolicy: - tls: - mode: SIMPLE - sni: kuadrant.io ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: Gateway -metadata: - name: public-api -spec: - gatewayClassName: istio - listeners: - - name: public-tls - port: 443 - hostname: 'public.cb.hcpapps.net' - protocol: HTTPS - allowedRoutes: - namespaces: - from: All - tls: - mode: Terminate - certificateRefs: - - name: public - kind: Secret ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: public-api -spec: - parentRefs: - - name: public-api - hostnames: - - 'public.cb.hcpapps.net' - rules: - - backendRefs: - - name: kuadrant.io - kind: Hostname - group: networking.istio.io - port: 443 - filters: - - type: URLRewrite - urlRewrite: - hostname: kuadrant.io ---- -apiVersion: kuadrant.io/v1 -kind: TLSPolicy -metadata: - name: tls -spec: - targetRef: - name: public-api - group: gateway.networking.k8s.io - kind: Gateway - issuerRef: - group: cert-manager.io - kind: ClusterIssuer - name: lets-encrypt-aws ---- -apiVersion: kuadrant.io/v1 -kind: DNSPolicy -metadata: - name: dns -spec: - targetRef: - group: gateway.networking.k8s.io - kind: Gateway - name: public-api - providerRefs: - - name: aws-provider-credentials diff --git a/dev/kuadrant-operator/examples/metal-lb.yaml b/dev/kuadrant-operator/examples/metal-lb.yaml deleted file mode 100644 index a2e5e41d..00000000 --- a/dev/kuadrant-operator/examples/metal-lb.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: metallb.io/v1beta1 -kind: IPAddressPool -metadata: - name: example - namespace: metallb-system -spec: - addresses: - - 172.31.200.0/24 ---- -apiVersion: metallb.io/v1beta1 -kind: L2Advertisement -metadata: - name: empty - namespace: metallb-system diff --git a/dev/kuadrant-operator/examples/oas-apikey.yaml b/dev/kuadrant-operator/examples/oas-apikey.yaml deleted file mode 100644 index 524dfb0f..00000000 --- a/dev/kuadrant-operator/examples/oas-apikey.yaml +++ /dev/null @@ -1,48 +0,0 @@ ---- -openapi: "3.1.0" -info: - title: "Pet Store API" - version: "1.0.0" - x-kuadrant: - route: - name: "toystore" - namespace: $devNS - hostnames: - - toystore.$rootDomain - parentRefs: - - name: $gatewayName - namespace: $gatewayNS -servers: - - url: https://toystore.$rootDomain/v1 -paths: - /toys: - get: # No sec requirements - operationId: "getToy" - responses: - 405: - description: "invalid input" - post: # API key - operationId: "postToy" - security: - - api_key: [] - responses: - 405: - description: "invalid input" - x-kuadrant: - backendRefs: - - name: toystore - namespace: $devNS - port: 80 - rate_limit: - rates: - - limit: 1 - duration: 10 - unit: second - counters: - - request.headers.api_key -components: - securitySchemes: - api_key: - type: apiKey - name: api_key - in: header diff --git a/dev/kuadrant-operator/examples/oas-oidc.yaml b/dev/kuadrant-operator/examples/oas-oidc.yaml deleted file mode 100644 index 0e9199e7..00000000 --- a/dev/kuadrant-operator/examples/oas-oidc.yaml +++ /dev/null @@ -1,55 +0,0 @@ ---- -openapi: "3.1.0" -info: - title: "Pet Store API" - version: "1.0.0" - x-kuadrant: - route: - name: "toystore" - namespace: $devNS - hostnames: - - toystore.$rootDomain - parentRefs: - - name: $gatewayName - namespace: $gatewayNS -servers: - - url: https://toystore.$rootDomain/v1 -paths: - /toys: - get: # No sec requirements - operationId: "getToy" - responses: - 405: - description: "invalid input" - x-kuadrant: - rate_limit: - rates: - - limit: 1 - duration: 5 - unit: second - counters: - - request.headers.Authorization - post: # OIDC protected - operationId: "postToy" - security: - - oidc: [] - responses: - 405: - description: "invalid input" - x-kuadrant: - backendRefs: - - name: toystore - namespace: $devNS - port: 80 - rate_limit: - rates: - - limit: 1 - duration: 10 - unit: second - counters: - - request.headers.Authorization -components: - securitySchemes: - oidc: - type: openIdConnect - openIdConnectUrl: https://$openIDHost/auth/realms/toystore diff --git a/dev/kuadrant-operator/examples/toystore/admin-key-secret.yaml b/dev/kuadrant-operator/examples/toystore/admin-key-secret.yaml deleted file mode 100644 index 7893c1e2..00000000 --- a/dev/kuadrant-operator/examples/toystore/admin-key-secret.yaml +++ /dev/null @@ -1,15 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - annotations: - secret.kuadrant.io/user-id: alice - creationTimestamp: null - labels: - app: toystore - admin: "yes" - authorino.kuadrant.io/managed-by: authorino - name: toystore-admin-apikey -stringData: - api_key: IAMADMIN -type: Opaque diff --git a/dev/kuadrant-operator/examples/toystore/alice-api-key-secret.yaml b/dev/kuadrant-operator/examples/toystore/alice-api-key-secret.yaml deleted file mode 100644 index 744e029b..00000000 --- a/dev/kuadrant-operator/examples/toystore/alice-api-key-secret.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - annotations: - secret.kuadrant.io/user-id: alice - creationTimestamp: null - labels: - app: toystore - authorino.kuadrant.io/managed-by: authorino - name: toystore-alice-apikey -stringData: - api_key: ALICEKEYFORDEMO -type: Opaque diff --git a/dev/kuadrant-operator/examples/toystore/authpolicy.yaml b/dev/kuadrant-operator/examples/toystore/authpolicy.yaml deleted file mode 100644 index fa046aea..00000000 --- a/dev/kuadrant-operator/examples/toystore/authpolicy.yaml +++ /dev/null @@ -1,50 +0,0 @@ ---- -apiVersion: kuadrant.io/v1 -kind: AuthPolicy -metadata: - name: toystore -spec: - targetRef: - group: gateway.networking.k8s.io - kind: HTTPRoute - name: toystore - rules: - authentication: - "apikey": - apiKey: - selector: - matchLabels: - app: toystore - credentials: - authorizationHeader: - prefix: APIKEY - response: - success: - filters: - "ext_auth_data": - json: - properties: - "user-id": - selector: auth.identity.metadata.annotations.secret\.kuadrant\.io/user-id ---- -apiVersion: kuadrant.io/v1 -kind: AuthPolicy -metadata: - name: toystore - namespace: gateway-system -spec: - targetRef: - group: gateway.networking.k8s.io - kind: Gateway - name: kuadrant-ingressgateway - rules: - authentication: - "apikey": - apiKey: - selector: - matchLabels: - app: toystore - admin: "yes" - credentials: - authorizationHeader: - prefix: APIKEY diff --git a/dev/kuadrant-operator/examples/toystore/authpolicy_jwt-k8s-authnz.yaml b/dev/kuadrant-operator/examples/toystore/authpolicy_jwt-k8s-authnz.yaml deleted file mode 100644 index 40f82cb5..00000000 --- a/dev/kuadrant-operator/examples/toystore/authpolicy_jwt-k8s-authnz.yaml +++ /dev/null @@ -1,44 +0,0 @@ -# A Kuadrant AuthPolicy to protect an API with authentication based on OpenId Connect (OIDC) ID tokens (signed JWTs) -# and Kubernetes Service Account tokens combined, and authorization with permissions (bindings) stored in the -# Kubernetes RBAC, enforced by Authorino, Kuadrant's authorization service. -apiVersion: kuadrant.io/v1 -kind: AuthPolicy -metadata: - name: toystore-protection -spec: - targetRef: - group: gateway.networking.k8s.io - kind: HTTPRoute - name: toystore - rules: - # The list of trusted identity sources which can send requests the protected API. - authentication: - # An OIDC authentication server listed as a trusted source of identities which can send requests the protected API. - # Authorino will prefetch the JWKS using OpenId Connect Discovery, and verify ID tokens (JWTs) issued by the server - # as valid authentication tokens to consume the protected API. - # Read more about this feature at https://github.com/Kuadrant/authorino/blob/v0.11.0/docs/user-guides/oidc-jwt-authentication.md. - "keycloak-users": - jwt: - issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant - - # Authorino will verify Kubernetes Service Account tokens, using Kubernetes TokenReview API, - # as valid authentication tokens to consume the protected API. - # Read more about this feature at https://github.com/Kuadrant/authorino/blob/v0.11.0/docs/user-guides/kubernetes-tokenreview.md. - "k8s-service-accounts": - kubernetesTokenReview: - audiences: - - https://kubernetes.default.svc.cluster.local - - # Authorino will enforce authorization using Kubernetes SubjectAccessReview API. - # Permissions to consume the API are stored in the Kubernetes cluster as ClusterRoles and ClusterRoleBindings. - # The path and the method of the request to the protected API will be implicitly used as the rules to check for - # an existing binding in the Kubernetes RBAC system. - # Read more about this feature at https://github.com/Kuadrant/authorino/blob/v0.11.0/docs/user-guides/kubernetes-subjectaccessreview.md. - # For using Roles and RoleBindings instead of ClusterRoles and ClusterRoleBindings, thus more flexible attribute-based - # permissions to protect the API, see the spec for `resourceAttributes` in the Authorino docs at - # https://github.com/Kuadrant/authorino/blob/v0.11.0/docs/features.md#kubernetes-subjectaccessreview-authorizationkubernetes. - authorization: - "k8s-rbac": - kubernetesSubjectAccessReview: - user: - selector: auth.identity.sub diff --git a/dev/kuadrant-operator/examples/toystore/bob-api-key-secret.yaml b/dev/kuadrant-operator/examples/toystore/bob-api-key-secret.yaml deleted file mode 100644 index e4edd01a..00000000 --- a/dev/kuadrant-operator/examples/toystore/bob-api-key-secret.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - annotations: - secret.kuadrant.io/user-id: bob - creationTimestamp: null - labels: - app: toystore - authorino.kuadrant.io/managed-by: authorino - name: toystore-bob-apikey -stringData: - api_key: BOBKEYFORDEMO -type: Opaque diff --git a/dev/kuadrant-operator/examples/toystore/httproute.yaml b/dev/kuadrant-operator/examples/toystore/httproute.yaml deleted file mode 100644 index 941e2db4..00000000 --- a/dev/kuadrant-operator/examples/toystore/httproute.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -apiVersion: gateway.networking.k8s.io/v1 -kind: HTTPRoute -metadata: - name: toystore - labels: - app: toystore -spec: - parentRefs: - - name: kuadrant-ingressgateway - namespace: gateway-system - hostnames: ["*.toystore.com"] - rules: - - matches: - - path: - type: PathPrefix - value: "/toy" - method: GET - - path: - type: Exact - value: "/admin/toy" - method: POST - - path: - type: Exact - value: "/admin/toy" - method: DELETE - backendRefs: - - name: toystore - port: 80 diff --git a/dev/kuadrant-operator/examples/toystore/kuadrant.yaml b/dev/kuadrant-operator/examples/toystore/kuadrant.yaml deleted file mode 100644 index 29de2d9a..00000000 --- a/dev/kuadrant-operator/examples/toystore/kuadrant.yaml +++ /dev/null @@ -1,6 +0,0 @@ ---- -apiVersion: kuadrant.io/v1beta1 -kind: Kuadrant -metadata: - name: kuadrant-sample -spec: {} diff --git a/dev/kuadrant-operator/examples/toystore/ratelimitpolicy_gateway.yaml b/dev/kuadrant-operator/examples/toystore/ratelimitpolicy_gateway.yaml deleted file mode 100644 index 6574e611..00000000 --- a/dev/kuadrant-operator/examples/toystore/ratelimitpolicy_gateway.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: kuadrant.io/v1 -kind: RateLimitPolicy -metadata: - name: toystore-gw - namespace: gateway-system -spec: - targetRef: - group: gateway.networking.k8s.io - kind: Gateway - name: kuadrant-ingressgateway - limits: - "expensive-operation": - rates: - - limit: 2 - window: 30s - when: - - predicate: "request.method == 'POST'" - - "limit-per-ip": - rates: - - limit: 5 - window: 30s - when: - - predicate: "source.id == source.address" diff --git a/dev/kuadrant-operator/examples/toystore/ratelimitpolicy_httproute.yaml b/dev/kuadrant-operator/examples/toystore/ratelimitpolicy_httproute.yaml deleted file mode 100644 index 8fea9b04..00000000 --- a/dev/kuadrant-operator/examples/toystore/ratelimitpolicy_httproute.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: kuadrant.io/v1 -kind: RateLimitPolicy -metadata: - name: toystore-httproute -spec: - targetRef: - group: gateway.networking.k8s.io - kind: HTTPRoute - name: toystore - limits: - "global": - rates: - - limit: 6 - window: 30s - "get-toy": - when: - - predicate: "request.method == 'GET'" - - predicate: "request.path == '/toy'" - rates: - - limit: 5 - window: 1m - "admin-post-toy-per-user": - rates: - - limit: 2 - window: 30s - counters: - - expression: "auth.identity.username" - when: - - predicate: "request.method == 'GET'" - - predicate: "request.path == '/admin/toy'" - - predicate: "auth.identity.group == 'admin'" - "admin-delete-per-user": - rates: - - limit: 2 - window: 30s - counters: - - expression: "auth.identity.username" - when: - - predicate: "request.method == 'DELETE'" - - predicate: "request.path == '/admin/toy'" - - predicate: "auth.identity.group == 'admin'" diff --git a/dev/kuadrant-operator/examples/toystore/toystore.yaml b/dev/kuadrant-operator/examples/toystore/toystore.yaml deleted file mode 100644 index 856510c9..00000000 --- a/dev/kuadrant-operator/examples/toystore/toystore.yaml +++ /dev/null @@ -1,41 +0,0 @@ ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: toystore - labels: - app: toystore -spec: - selector: - matchLabels: - app: toystore - template: - metadata: - labels: - app: toystore - spec: - containers: - - name: toystore - image: quay.io/kuadrant/authorino-examples:talker-api - env: - - name: LOG_LEVEL - value: "debug" - - name: PORT - value: "3000" - ports: - - containerPort: 3000 - name: http - replicas: 1 ---- -apiVersion: v1 -kind: Service -metadata: - name: toystore -spec: - selector: - app: toystore - ports: - - name: http - port: 80 - protocol: TCP - targetPort: 3000 diff --git a/dev/kuadrant-operator/index.html b/dev/kuadrant-operator/index.html deleted file mode 100644 index 9bbbecf1..00000000 --- a/dev/kuadrant-operator/index.html +++ /dev/null @@ -1,1386 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Kuadrant Operator - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Kuadrant Operator

-

Code Style -Testing -codecov -License -OpenSSF Best Practices -FOSSA Status

-

Overview

-

Kuadrant leverages Gateway API and Policy Attachment to enhance gateway providers like Istio and Envoy Gateway with additional features via Policies. Those features include TLS, DNS, application authentication & authorization, and rate limiting.

-

You can find more information on the different aspects of Kuadrant at the documentation links below:

- -

Contributing

-

The Development guide describes how to build the kuadrant operator and -how to test your changes before submitting a patch or opening a PR.

-

Join us on the #kuadrant channel in the Kubernetes Slack workspace, -for live discussions about the roadmap and more.

-

Licensing

-

This software is licensed under the Apache 2.0 license.

-

See the LICENSE and NOTICE files that should have been provided along with this software for details.

-

FOSSA Status

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/doc/RELEASE/index.html b/dev/kuadrantctl/doc/RELEASE/index.html deleted file mode 100644 index 1ac0d286..00000000 --- a/dev/kuadrantctl/doc/RELEASE/index.html +++ /dev/null @@ -1,1422 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - RELEASE - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

RELEASE

- -

Release

-

The release process follows a streamlined approach, no release branches involved. -New releases can be major, minor or patch based releases, but always incrementing digits -regarding the latest release version.

-

New Major.Minor.Patch version

-
    -
  1. Create a new minor release branch from the HEAD of main: -
    git checkout -b release-vX.Y.Z
    -
  2. -
  3. Update version (prefixed with "v"): -
    make prepare-release VERSION=vX.Y.Z
    -
  4. -
  5. Verify local changes: -
    make install
    -bin/kuadrantctl version
    -
    -The output should be the new version, for example : -
    kuadrantctl v0.3.0 (ff779a1-dirty)
    -
  6. -
  7. Commit and push: -
    git add .
    -git commit -m "prepare-release: release-vX.Y.Z"
    -git push origin release-vX.Y.Z
    -
  8. -
  9. Create git tag: -
    git tag -s -m vX.Y.Z vX.Y.Z
    -git push origin vX.Y.Z
    -
  10. -
  11. -

    In Github, create release.

    -
  12. -
  13. -

    Pick recently pushed git tag

    -
  14. -
  15. Automatically generate release notes from previous released tag
  16. -
  17. -

    Set as the latest release

    -
  18. -
  19. -

    Verify that the build Release workflow is triggered and completes for the new tag

    -
  20. -
-

Verify new release is available

-
    -
  1. Download the latest binary for your platform from the kuadrantctl Latest Releases page.
  2. -
  3. Unpack the binary.
  4. -
  5. Move it to a directory in your $PATH so that it can be executed from anywhere.
  6. -
  7. Check the version: -
    kuadrantctl version
    -
    -The output should be the new version, for example : -
    kuadrantctl v0.3.0 (eec318b2e11e7ea5add5e550ff872bde64555d8f)
    -
  8. -
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/doc/development/index.html b/dev/kuadrantctl/doc/development/index.html deleted file mode 100644 index e8286080..00000000 --- a/dev/kuadrantctl/doc/development/index.html +++ /dev/null @@ -1,1385 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Development Guide - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Development Guide

-

Technology stack required for development

-
    -
  • git
  • -
  • go version 1.21+
  • -
-

Build the CLI

-
$ git clone https://github.com/kuadrant/kuadrantctl.git
-$ cd kuadrantctl && make install
-$ bin/kuadrantctl version
-{"level":"info","ts":"2023-11-08T23:44:57+01:00","msg":"kuadrantctl version: latest"}
-
-

Quick steps to contribute

-
    -
  • Fork the project.
  • -
  • Download your fork to your PC (git clone https://github.com/your_username/kuadrantctl && cd kuadrantctl)
  • -
  • Create your feature branch (git checkout -b my-new-feature)
  • -
  • Make changes and run tests (make test)
  • -
  • Add them to staging (git add .)
  • -
  • Commit your changes (git commit -m 'Add some feature')
  • -
  • Push to the branch (git push origin my-new-feature)
  • -
  • Create new pull request
  • -
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/doc/generate-gateway-api-httproute/index.html b/dev/kuadrantctl/doc/generate-gateway-api-httproute/index.html deleted file mode 100644 index c369e21c..00000000 --- a/dev/kuadrantctl/doc/generate-gateway-api-httproute/index.html +++ /dev/null @@ -1,1402 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Generate gateway api httproute - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Generate gateway api httproute

- -

Generate Gateway API HTTPRoute object from OpenAPI 3

-

The kuadrantctl generate gatewayapi httproute command generates an Gateway API HTTPRoute -from your OpenAPI Specification (OAS) 3.x powered with kuadrant extensions.

-

OpenAPI specification

-

An OpenAPI document resource can be provided to the cli by one of the following channels:

-
    -
  • Filename in the available path.
  • -
  • URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
  • -
  • Read from stdin standard input stream.
  • -
-

Usage

-
$ kuadrantctl generate gatewayapi httproute -h
-Generate Gateway API HTTPRoute from OpenAPI 3.0.X
-
-Usage:
-  kuadrantctl generate gatewayapi httproute [flags]
-
-Flags:
-  -h, --help          help for httproute
-  --oas string        Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)
-  -o Output format:   'yaml' or 'json'. (default "yaml")
-
-Global Flags:
-  -v, --verbose   verbose output
-
-
-

Under the example folder there are examples of OAS 3 that can be used to generate the resources

-
-

As an AuthPolicy and RateLimitPolicy both require a HTTPRoute to target, the user guides for generating those policies include examples of running the kuadrantctl generate gatewayapi httproute command.

-

You can find those guides here:

- - - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/doc/generate-kuadrant-auth-policy/index.html b/dev/kuadrantctl/doc/generate-kuadrant-auth-policy/index.html deleted file mode 100644 index 0b8c7409..00000000 --- a/dev/kuadrantctl/doc/generate-kuadrant-auth-policy/index.html +++ /dev/null @@ -1,1863 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Generate kuadrant auth policy - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

Generate kuadrant auth policy

- -

Generate Kuadrant AuthPolicy object from OpenAPI 3

-

The kuadrantctl generate kuadrant authpolicy command generates an Kuadrant AuthPolicy -from your OpenAPI Specification (OAS) 3.x powered with kuadrant extensions.

-

OpenAPI specification

-

An OpenAPI document resource can be provided to the cli by one of the following channels:

-
    -
  • Filename in the available path.
  • -
  • URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
  • -
  • Read from stdin standard input stream.
  • -
-

OpenAPI Security Scheme Object types

- - - - - - - - - - - - - - - - - - - - - - - - - -
TypesImplemented
openIdConnectYES
apiKeyYES
httpNO
oauth2NO
-

openIdConnect Type Description

-

The following OAS example has one protected endpoint GET /dog with openIdConnect security scheme type.

-
paths:
-  /dog:
-    get:
-      operationId: "getDog"
-      security:
-
-        - securedDog: []
-      responses:
-        405:
-          description: "invalid input"
-components:
-  securitySchemes:
-    securedDog:
-      type: openIdConnect
-      openIdConnectUrl: https://example.com/.well-known/openid-configuration
-
-

Running the command

-
kuadrantctl generate kuadrant authpolicy --oas ./petstore-openapi.yaml  | yq -P
-
-

The generated authpolicy (only relevan fields shown here):

-
kind: AuthPolicy
-apiVersion: kuadrant.io/v1beta2
-metadata:
-  name: petstore
-  namespace: petstore
-  creationTimestamp: null
-spec:
-  routeSelectors:
-
-    - matches:
-        - path:
-            type: Exact
-            value: /api/v1/dog
-          method: GET
-  rules:
-    authentication:
-      getDog_securedDog:
-        credentials: {}
-        jwt:
-          issuerUrl: https://example.com/.well-known/openid-configuration
-        routeSelectors:
-          - matches:
-              - path:
-                  type: Exact
-                  value: /api/v1/dog
-                method: GET
-
-

apiKey Type Description

-

The following OAS example has one protected endpoint GET /dog with apiKey security scheme type.

-
paths:
-  /dog:
-    get:
-      operationId: "getDog"
-      security:
-
-        - securedDog: []
-      responses:
-        405:
-          description: "invalid input"
-components:
-  securitySchemes:
-    securedDog:
-      type: apiKey
-      name: dog_token
-      in: query
-
-

Running the command

-
kuadrantctl generate kuadrant authpolicy --oas ./petstore-openapi.yaml  | yq -P
-
-

The generated authpolicy (only relevan fields shown here):

-
kind: AuthPolicy
-apiVersion: kuadrant.io/v1beta2
-metadata:
-  name: petstore
-  namespace: petstore
-  creationTimestamp: null
-spec:
-  routeSelectors:
-
-    - matches:
-        - path:
-            type: Exact
-            value: /dog
-          method: GET
-  rules:
-    authentication:
-      getDog_securedDog:
-        credentials:
-          queryString:
-            name: dog_token
-          apiKey:
-            selector:
-              matchLabels:
-                kuadrant.io/apikeys-by: securedDog
-        routeSelectors:
-          - matches:
-              - path:
-                  type: Exact
-                  value: /dog
-                method: GET
-
-

In this particular example, the endpoint GET /dog will be protected. -The token needs to be in the query string of the request included in a parameter named dog_token. -Kuadrant will validate received tokens against tokens found in kubernetes secrets with label kuadrant.io/apikeys-by: ${sec scheme name}. -In this particular example the label selector will be: kuadrant.io/apikeys-by: securedDog.

-

Like the following example:

-
apiVersion: v1
-kind: Secret
-metadata:
-  name: api-key-1
-  labels:
-    authorino.kuadrant.io/managed-by: authorino
-    kuadrant.io/apikeys-by: securedDog
-stringData:
-  api_key: MYSECRETTOKENVALUE
-type: Opaque
-
-
-

Note: Kuadrant validates tokens against api keys found in secrets. The label selector format kuadrant.io/apikeys-by: ${sec scheme name} is arbitrary and designed for this CLI command.

-
-

For more information about Kuadrant auth based on api key: https://docs.kuadrant.io/latest/authorino/docs/user-guides/api-key-authentication/

-

Usage

-
Generate Kuadrant AuthPolicy from OpenAPI 3.0.X
-
-Usage:
-  kuadrantctl generate kuadrant authpolicy [flags]
-
-Flags:
-  -h, --help         help for authpolicy
-  --oas string        Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)
-  -o Output format:   'yaml' or 'json'. (default "yaml")
-
-Global Flags:
-  -v, --verbose   verbose output
-
-
-

Under the example folder there are examples of OAS 3 that can be used to generate the resources

-
-

User Guide

-

The verification steps will lead you to the process of deploying and testing the following api with -endpoints protected using different security schemes:

- - - - - - - - - - - - - - - - - - - - - - - - - -
OperationSecurity Scheme
GET /api/v1/catpublic (not auth)
POST /api/v1/catApiKey in header
GET /api/v1/dogOpenIdConnect
GET /api/v1/snakeOpenIdConnect OR ApiKey in query string
-
    -
  • [Optional] Setup SSO service supporting OIDC. For this example, we will be using keycloak.
  • -
  • Create a new realm petstore
  • -
  • Create a client petstore. In the Client Protocol field, select openid-connect.
  • -
  • Configure client settings. Access Type to public. Direct Access Grants Enabled to ON (for this example password will be used directly to generate the token).
  • -
  • Add a user to the realm
      -
    • Click the Users menu on the left side of the window. Click Add user.
    • -
    • Type the username bob, set the Email Verified switch to ON, and click Save.
    • -
    • On the Credentials tab, set the password p. Enter the password in both the fields, set the Temporary switch to OFF to avoid the password reset at the next login, and click Set Password.
    • -
    -
  • -
-

Now, let's run local cluster to test the kuadrantctl new command to generate authpolicy.

-
    -
  • Clone the repo
  • -
-
git clone https://github.com/Kuadrant/kuadrantctl.git
-cd kuadrantctl
-
-
    -
  • Setup a cluster, Istio and Gateway API CRDs and Kuadrant
  • -
-

Use our single-cluster quick start script - this will install Kuadrant in a local kind cluster: https://docs.kuadrant.io/latest/getting-started-single-cluster/

-
    -
  • Build and install CLI in bin/kuadrantctl path
  • -
-
make install
-
-
    -
  • Deploy petstore backend API
  • -
-
kubectl create namespace petstore
-kubectl apply -n petstore -f examples/petstore/petstore.yaml
-
-
    -
  • Let's create Petstore's OpenAPI spec
  • -
-
- -
cat <<EOF >petstore-openapi.yaml
----
-openapi: "3.1.0"
-info:
-  title: "Pet Store API"
-  version: "1.0.0"
-x-kuadrant:
-  route:
-    name: "petstore"
-    namespace: "petstore"
-    hostnames:
-
-      - example.com
-    parentRefs:
-      - name: istio-ingressgateway
-        namespace: istio-system
-servers:
-  - url: https://example.io/api/v1
-paths:
-  /cat:
-    x-kuadrant:
-      backendRefs:
-        - name: petstore
-          port: 80
-          namespace: petstore
-    get:  # No sec requirements
-      operationId: "getCat"
-      responses:
-        405:
-          description: "invalid input"
-    post:  # API key
-      operationId: "postCat"
-      security:
-        - cat_api_key: []
-      responses:
-        405:
-          description: "invalid input"
-  /dog:
-    x-kuadrant:
-      backendRefs:
-        - name: petstore
-          port: 80
-          namespace: petstore
-    get:  # OIDC
-      operationId: "getDog"
-      security:
-        - oidc:
-          - read:dogs
-      responses:
-        405:
-          description: "invalid input"
-  /snake:
-    x-kuadrant:
-      backendRefs:
-        - name: petstore
-          port: 80
-          namespace: petstore
-    get:  # OIDC or API key
-      operationId: "getSnake"
-      security:
-        - oidc: ["read:snakes"]
-        - snakes_api_key: []
-      responses:
-        405:
-          description: "invalid input"
-components:
-  securitySchemes:
-    cat_api_key:
-      type: apiKey
-      name: api_key
-      in: header
-    oidc:
-      type: openIdConnect
-      openIdConnectUrl: https://${KEYCLOAK_PUBLIC_DOMAIN}/auth/realms/petstore
-    snakes_api_key:
-      type: apiKey
-      name: snake_token
-      in: query
-EOF
-
- -
- -
-

Replace ${KEYCLOAK_PUBLIC_DOMAIN} with your SSO instance domain

-
-
    -
  • -

    Create an API key only valid for POST /api/v1/cat endpoint -

    kubectl apply -f -<<EOF
    -apiVersion: v1
    -kind: Secret
    -metadata:
    -  name: cat-api-key-1
    -  namespace: petstore
    -  labels:
    -    authorino.kuadrant.io/managed-by: authorino
    -    kuadrant.io/apikeys-by: cat_api_key
    -stringData:
    -  api_key: I_LIKE_CATS
    -type: Opaque
    -EOF
    -

    -
    -

    Note: the label's value of kuadrant.io/apikeys-by: cat_api_key is the name of the sec scheme of the OpenAPI spec.

    -
    -
  • -
  • -

    Create an API key only valid for GET /api/v1/snake endpoint

    -
  • -
-
kubectl apply -f -<<EOF
-apiVersion: v1
-kind: Secret
-metadata:
-  name: snake-api-key-1
-  namespace: petstore
-  labels:
-    authorino.kuadrant.io/managed-by: authorino
-    kuadrant.io/apikeys-by: snakes_api_key
-stringData:
-  api_key: I_LIKE_SNAKES
-type: Opaque
-EOF
-
-
-

Note: the label's value of kuadrant.io/apikeys-by: snakes_api_key is the name of the sec scheme of the OpenAPI spec.

-
-
    -
  • Create the HTTPRoute using the CLI
  • -
-
bin/kuadrantctl generate gatewayapi httproute --oas petstore-openapi.yaml | kubectl apply -n petstore -f -
-
-
    -
  • Create Kuadrant's Auth Policy
  • -
-
bin/kuadrantctl generate kuadrant authpolicy --oas petstore-openapi.yaml | kubectl apply -n petstore -f -
-
-

Now, we are ready to test OpenAPI endpoints ❗

-
    -
  • GET /api/v1/cat -> It's a public endpoint, hence should return 200 Ok
  • -
-
curl  -H "Host: example.com" -i "http://127.0.0.1:9080/api/v1/cat"
-
-
    -
  • POST /api/v1/cat -> It's a protected endpoint with apikey
  • -
-

Without any credentials, it should return 401 Unauthorized

-
curl  -H "Host: example.com" -X POST -i "http://127.0.0.1:9080/api/v1/cat"
-
-
HTTP/1.1 401 Unauthorized
-www-authenticate: Bearer realm="getDog_oidc"
-www-authenticate: Bearer realm="getSnake_oidc"
-www-authenticate: snake_token realm="getSnake_snakes_api_key"
-www-authenticate: api_key realm="postCat_cat_api_key"
-x-ext-auth-reason: {"postCat_cat_api_key":"credential not found"}
-date: Tue, 28 Nov 2023 22:28:44 GMT
-server: istio-envoy
-content-length: 0
-
-

The reason headers tell that credential not found. -Credentials satisfying postCat_cat_api_key authentication is needed.

-

According to the OpenAPI spec, it should be a header named api_key. -What if we try a wrong token? one token assigned to other endpoint, -i.e. I_LIKE_SNAKES instead of the valid one I_LIKE_CATS. It should return 401 Unauthorized.

-
curl  -H "Host: example.com" -H "api_key: I_LIKE_SNAKES" -X POST -i "http://127.0.0.1:9080/api/v1/cat"
-
-
HTTP/1.1 401 Unauthorized
-www-authenticate: Bearer realm="getDog_oidc"
-www-authenticate: Bearer realm="getSnake_oidc"
-www-authenticate: snake_token realm="getSnake_snakes_api_key"
-www-authenticate: api_key realm="postCat_cat_api_key"
-x-ext-auth-reason: {"postCat_cat_api_key":"the API Key provided is invalid"}
-date: Tue, 28 Nov 2023 22:32:55 GMT
-server: istio-envoy
-content-length: 0
-
-

The reason headers tell that the API Key provided is invalid. -Using valid token (from the secret cat-api-key-1 assigned to POST /api/v1/cats) -in the api_key header should return 200 Ok

-
curl  -H "Host: example.com" -H "api_key: I_LIKE_CATS" -X POST -i "http://127.0.0.1:9080/api/v1/cat"
-
-
    -
  • GET /api/v1/dog -> It's a protected endpoint with oidc (assigned to our keycloak instance and petstore realm)
  • -
-

without credentials, it should return 401 Unauthorized

-
curl -H "Host: example.com" -i "http://127.0.0.1:9080/api/v1/dog"
-
-

To get the authentication token, this example is using Direct Access Grants oauth2 grant type -(also known as Client Credentials grant type). When configuring the Keycloak (OIDC provider) client -settings, we enabled Direct Access Grants to enable this procedure. -We will be authenticating as bob user with p password. -We previously created bob user in Keycloak in the petstore realm.

-
export ACCESS_TOKEN=$(curl -k -H "Content-Type: application/x-www-form-urlencoded" \
-        -d 'grant_type=password' \
-        -d 'client_id=petstore' \
-        -d 'scope=openid' \
-        -d 'username=bob' \
-        -d 'password=p' "https://${KEYCLOAK_PUBLIC_DOMAIN}/auth/realms/petstore/protocol/openid-connect/token" | jq -r '.access_token')
-
-
-

Replace ${KEYCLOAK_PUBLIC_DOMAIN} with your SSO instance domain

-
-

With the access token in place, let's try to get those puppies

-
curl -H "Authorization: Bearer $ACCESS_TOKEN" -H 'Host: example.com' http://127.0.0.1:9080/api/v1/dog -i
-
-

it should return 200 OK

-
    -
  • GET /api/v1/snake -> It's a protected endpoint with oidc (assigned to our keycloak instance and petstore realm) OR with apiKey
  • -
-

This example is to show that multiple security requirements (with OR semantics) can be specified -for an OpenAPI operation.

-

Without credentials, it should return 401 Unauthorized

-
curl -H "Host: example.com" -i "http://127.0.0.1:9080/api/v1/snake"
-
-

With the access token in place, it should return 200 OK (unless the token has expired).

-
curl -H "Authorization: Bearer $ACCESS_TOKEN" -H 'Host: example.com' http://127.0.0.1:9080/api/v1/snake -i
-
-

With apiKey it should also work. According to the OpenAPI spec security scheme, -it should be a query string named snake_token and the token needs to be valid token -(from the secret snake-api-key-1 assigned to GET /api/v1/snake)

-
curl -H 'Host: example.com' -i "http://127.0.0.1:9080/api/v1/snake?snake_token=I_LIKE_SNAKES"
-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/doc/generate-kuadrant-rate-limit-policy/index.html b/dev/kuadrantctl/doc/generate-kuadrant-rate-limit-policy/index.html deleted file mode 100644 index 354cd939..00000000 --- a/dev/kuadrantctl/doc/generate-kuadrant-rate-limit-policy/index.html +++ /dev/null @@ -1,1570 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Generate kuadrant rate limit policy - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Generate kuadrant rate limit policy

- -

Generate Kuadrant RateLimitPolicy object from OpenAPI 3

-

The kuadrantctl generate kuadrant ratelimitpolicy command generates a Kuadrant RateLimitPolicy -from your OpenAPI Specification (OAS) 3.x document powered with Kuadrant extensions.

-

OpenAPI specification

-

An OpenAPI document resource can be provided to the Kuadrant CLI in one of the following ways:

-
    -
  • Filename in the available path.
  • -
  • URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
  • -
  • Read from stdin standard input stream.
  • -
-

Usage

-
Generate Kuadrant RateLimitPolicy from OpenAPI 3.0.x
-
-Usage:
-  kuadrantctl generate kuadrant ratelimitpolicy [flags]
-
-Flags:
-  -h, --help         help for ratelimitpolicy
-  --oas string        Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)
-  -o Output format:   'yaml' or 'json'. (default "yaml")
-
-Global Flags:
-  -v, --verbose   verbose output
-
-
-

Note: The kuadrantctl/examples directory in GitHub includes sample OAS 3 files that you can use to generate the resources.

-
-

Procedure

-
    -
  1. -

    Clone the Git repository as follows: -

    git clone https://github.com/Kuadrant/kuadrantctl.git
    -cd kuadrantctl
    - ```
    -2. Set up a cluster, Istio and Gateway API CRDs, and Kuadrant as follows: 
    -
    -
    -* Use the single-cluster quick start script to install Kuadrant in a local `kind` cluster: https://docs.kuadrant.io/latest/getting-started-single-cluster/.
    -
    -
    -3. Build and install the CLI in `bin/kuadrantctl` path as follows:
    -```bash
    -make install
    -

    -
  2. -
  3. -

    Deploy the Petstore backend API as follows: -

    kubectl create namespace petstore
    -kubectl apply -n petstore -f examples/petstore/petstore.yaml
    -

    -
  4. -
  5. -

    Create the Petstore OpenAPI definition as follows:

    -
  6. -
-
- -
cat <<EOF >petstore-openapi.yaml
----
-openapi: "3.0.3"
-info:
-  title: "Pet Store API"
-  version: "1.0.0"
-x-kuadrant:  ## Root-level Kuadrant extension
-  route:
-    name: "petstore"
-    namespace: "petstore"
-    hostnames:
-
-      - example.com
-    parentRefs:
-      - name: istio-ingressgateway
-        namespace: istio-system
-servers:
-  - url: https://example.io/v1
-paths:
-  /cat:
-    x-kuadrant:  ## Path-level Kuadrant extension
-      backendRefs:
-        - name: petstore
-          port: 80
-          namespace: petstore
-      rate_limit:
-        rates:
-          - limit: 1
-            duration: 10
-            unit: second
-        counters:
-          - request.headers.x-forwarded-for
-    get:  # Added to the route and rate limited
-      operationId: "getCat"
-      responses:
-        405:
-          description: "invalid input"
-    post:  # NOT added to the route
-      x-kuadrant: 
-        disable: true
-      operationId: "postCat"
-      responses:
-        405:
-          description: "invalid input"
-  /dog:
-    get:  # Added to the route and rate limited
-      x-kuadrant:  ## Operation-level Kuadrant extension
-        backendRefs:
-          - name: petstore
-            port: 80
-            namespace: petstore
-        rate_limit:
-          rates:
-            - limit: 3
-              duration: 10
-              unit: second
-          counters:
-            - request.headers.x-forwarded-for
-      operationId: "getDog"
-      responses:
-        405:
-          description: "invalid input"
-    post:  # Added to the route and NOT rate limited
-      x-kuadrant:  ## Operation-level Kuadrant extension
-        backendRefs:
-          - name: petstore
-            port: 80
-            namespace: petstore
-      operationId: "postDog"
-      responses:
-        405:
-          description: "invalid input"
-EOF
-
-
- -
-

Note: The servers base path is not included. This is work-in-progress in follow-up PRs.

-
- - - - - - - - - - - - - - - - - - - - - - - - - -
OperationApplied configuration
GET /catShould return 200 OK and be rate limited (1 req / 10 seconds).
POST /catNot added to the HTTPRoute. Should return 404 Not Found.
GET /dogShould return 200 OK and be rate limited (3 req / 10 seconds).
POST /dogShould return 200 OK and NOT rate limited.
-
    -
  1. -

    Create the HTTPRoute by using the CLI as follows: -

    bin/kuadrantctl generate gatewayapi httproute --oas petstore-openapi.yaml | kubectl apply -n petstore -f -
    -

    -
  2. -
  3. -

    Create the rate limit policy as follows: -

    bin/kuadrantctl generate kuadrant ratelimitpolicy --oas petstore-openapi.yaml | kubectl apply -n petstore -f -
    -

    -
  4. -
  5. -

    Test the OpenAPI endpoints as follows:

    -
  6. -
  7. -

    GET /cat - Should return 200 OK and be rate limited (1 req / 10 seconds). -

    curl --resolve example.com:9080:127.0.0.1 -v "http://example.com:9080/cat"
    -

    -
  8. -
  9. POST /cat - Not added to the HTTPRoute. Should return 404 Not Found. -
    curl --resolve example.com:9080:127.0.0.1 -v -X POST "http://example.com:9080/cat"
    -
  10. -
  11. GET /dog - Should return 200 OK and be rate limited (3 req / 10 seconds).
  12. -
-
curl --resolve example.com:9080:127.0.0.1 -v "http://example.com:9080/dog"
-
-
    -
  • POST /dog - Should return 200 OK and NOT rate limited.
  • -
-
curl --resolve example.com:9080:127.0.0.1 -v -X POST "http://example.com:9080/dog"
-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/doc/images/apicurio-security-scheme-apikey.png b/dev/kuadrantctl/doc/images/apicurio-security-scheme-apikey.png deleted file mode 100644 index 0da63fc05154c49b077c510f1c40ddeff30ae168..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9423 zcmcI~cQl-B_pVeeK@frGi!A?qxABPL&C#5J39+77=95E92QF_<9C$) zC{k5T%~BvPJe-_mCYaf8V{;SfIKMmLZ&vv4ITlzTuydXYp`Ij;UoD+DSuZr;_Vx8; zU`V)jTj5unV5UZj^mfR}$%(MA@bW-jg1DOv3bnm9R#|yt`e=V^cYT77nq+!v>f-rx zCcH6iZ(hIV@xzcT! zMt5)2ikO9tj;_1AduV6~zTtbYjV&(Tf@^n|mL!m|25n$qkeZs>(b1v!;pUU#)Ain< z^z`&F22mUicd|Dfo}Ztuld8lPFKinf7N%xr#~t3hT=);1*n+*NqiI;^p+R-S@Gvi{ zdDKC{X1=As?akqY;A6~ zwzXkmReEp81lOmYn&|13y3DqI`zBN&$4wL3I0a`V@!-Mp8N*fVkX@AWod_it*R!iL zMTVJ?(ca#E>`jJMd%W9HA7*K3$)fp#@6q38)c#^`CKo5?l`B{3>gtA5qGDobX=nyU zM^%0AMJe0b*a!#+?D?D|K5)&d8)v9Ih~cJDNqa5FTJAjE)DS`?+{$NM*7e2;PD#Y> z_rueBx{uGU==gZ?XI)>&EbYasD7rU@&>F-~JuL zR869bm+OPVoKPqlqAT(7^rR$3>tmI-DX+`P`_fWUu1!si=Y7CG{w3u)r|Rcd^Wj5B zh1;@182z1?wiIdK_)nh%TxME!geBg`az?#teQ}e_dc5kzxpU{{=H@IdEg=KQnIspS zJBo{n-i3y0b4IzjxeXSWO%z$GsHwI8ctbdtYtUQc^6c5dXt^uK|1LXwzENpMOUuGk zV<_qpSuhn7Q_YL5x!Bm)p<-)>8#gErPX!E6C?S@ou#AF&!fL`;rH7-dYjH^lKIke# zVp7ub@-iG&WMt%p^XE-}{I1N`ggAcwoPN!fl$5l%xHu^($<@W>o8x1Q&f0IREE>7L zx5sUS`!zaRm7To=JA%CA<>h5`|I>q3dGX@KZAec^&-=PaZSAdoJu_IY^XE^@moE=7 zIyQ-$gS98ZA|g9`f2Sdr8EZx$@?8vMjKdhs7GjpXJf8OUu*OCWa>n6D%(G5n^Bs(} zzK8VqS&4O3@8!G`Mx3^OtoKyIB;@>Ly`p-8h>wkIchwWU4p6_+6c@4BLt0+r zCl(e3zU1Cqb2KzGln5UmA9l5Pxv1FqcrpeN)kB(3pFU-k|H3a1)Ok%f(wweH7{R}|)sXV*j{h2P1(_1R*DUOypGeSCe-(!lRS z90e;i$??$dOXu zEiRy{aD{n!t9=;ek>TNSzY&k7_vNcx3(H$FM;jJ2q4!!|5?u z7r3fdhRhPZD=J!AEMnIFTSCZd`&pc-`|7Z8g&Z1rhn;1ksBLA+dhtrdS~cbB~>UF2>9clb`R7LL~}VDqG{BrKl5ygoYZGI+!>+J0Fr% zOYL;D%)0pd`y+_(xUQsoJUon|4v9@dJFeKFmG9plB3d%k63Cb&e|L3txg7(BQXz~@ zOx#eY1q5%5ghkU8k%Te7^7Hl!U8&qOTwGi#tP!Z3U@_Da*&z))H~ zzpZvr*sHFY8Du;4wQE0SUC3{WeK9R}oT!=b-5nb)v1OL>67oMeI(Ol+&vv(BB)gi8 ze|=Ju;%_tmV>eArO%)YVx8TQ}Ygd*LFJA1IIZuC9zQY(|)sZ+fnM^)xfz?m(J6Hrz z3TKp1X#WGBGfhSf#hj-aaNYBU^=av{@vo4t>4z{{8#hygU_U<)Y%^%98t+A%>*3vOhT`(byBcI9B!)PS-ny!~y1J?g(iIw_ zq@-k@jQhH#zW(nJs=FB)DIL>)kw|1+eLVn!Mp(sele&FchPuTc02E0k$D_T!%IM9_ z*)|<(Ub7lIbMvXy)v=**R}-O}%*;~DwrFO*y&n)*_Q)j37n{es6PYxBaC$~YDnzM8 zZXQ}jby_?SsP8HA+S<2S1LrBZ!{;g&E?nS^$c8)y`D+ zz4yxHrpz;X@_4#=hVzJy*U?X(|gJG+*Fm;eA(LKltZF*?{?U!P1Z zuO}+WPAxu0Ra)8EsS;rd3f6y=pVeZ;!qSp58wuuHR41)dx$&#-FDZJ4hkFJFnkJnv z60TVktWh{hL9Y8APnq!=yo_FfsT$EE6&3F_dNl-5Xrqg} zJA1%rwRdT#jkvgYU(1LFrJjzC=5_H(x90$_KGLVqmP{0dR<+2@Z=EI^^nKFgIGPIO zv-%bL&xtQpRJ6sq)cE)~J3D(glN2W(U#1>+m1i*-85sn?!^MR)z_{A0#P`xt+SPL7 z@i$Fq3xmwVYW{89hGRv79AZO4(fNn zO#y|O#ZhnbX{f2?&e~2ZC<;ExkaXlCbbFlrh(bH9yRYLAkW!%aHnz6AYhyXH-_49c z1IWoW#0grH01g-$rX!yl8q(w6@`;w79(O!{urn^ZTIw{@axG4EU>Y;Jy}d1LJ78St zo>x|;pQk!4sjnN#^Ti8) zNxvfr6R{_V@3{v441!H5ypJ?$?@?ZdMlK~O#4I!rm@LEex^v#c8LD*`D5Cey%6@pL zb2*n@eqXF9+J)18Z)37=%H+a@H})eqU!cFby1H<56E(gPCc*7ir>lO1R8&9%F)ei%`x$u`v{4rNe6<=zXS^KhIZ1+k78qCG zNu}H})X z9f=`#0`?9GH}}u4GOXuiehCZ?Zfj;4~N|$RRsH=uLNh;*hYb{ z0u24YEaL}B8ll7%`4gIioxQ!{`y2lZJ}Dkb5Oq{nQWEo6GgepsAjirtAV4qX^zXm_ zeo0B0a6NqRVsi%61kiQ+@v8klKi=^9y1veX;NSpC`L6m+UXGUM&m$5g_irD+ z{FWa^H*JPw_1fk*Zi$ep4|;L9_rA2`n3M&07>K{U5l1~HnG+K<^2LiVrUx#@rCyCo z+zSL%UhwYH@Pf_E{#8zsh}BLKV&b1ae-4$qW`>7rtmCi3vO=(`y1M#G#Rtu2RW|*n z%M~A#(aNflNuOLITG8U-lG0|kB80R=Gm3}oWNKd$Q=PAnU^uj{cNgpLV*z*!l)0pAQsib~>ktri&p^aYa)r)36Sob|AW_V~! z+tRY_tV{W;SCb=-hrA{gep3zir1n}Ei_15xhAKVQ0jeO~Xu0(_Mq8E90zyJU5)y~s zUtQp{l^}xWcO@J1=3p>OD;z0&_k~u*rIk3NvN0Ge8Xeb3OLQ@aujmUkKkYa-N#GRl ze>;&wp!Gg$8tx!#%+0?hB{7y<0>20BA%5Dk+YKVy|4-LNJggi{2+u|?)4%WZs-~_N zcs4FG91aUhUOJ02ED%a_cK`3$I{z)Z=>PWI6C?9AI4l}&{VunpBngiWux0crSt68P zzkUt6%B+wPKdGaq=k4unR_zrfosGl{Xae8d|5G zwirHZOUvh~R$NC{_c}8(GbLr6Rqevac%u_YgB#4uwHD1}goMFN zrJYvAvIs;+iVPo3sP|}#N@hWUqm4~`TpU;%I~9kUl!f}-9v&Wmnf9D|j~{bJDdQ2QMfFs|~%1_u+m zaR>>az^*aJ?$rg7f&&583JVL%j~_q!y5X$Gp?N~5H8fR1Bjptoq(Qks`v8g?@DT`| zl=sflr%!3S8&xd37+FGsOV zuH9mtMvn)OPrsNub`+1 z$s4o0us{@ufc^rWBH}F)!wv2r7gwY*+L(6`9Hz0cY7p8Wpwbk=C_@`rBb0b!!s&(I zwauDVy0?#wjn&n?ruY;eZ)0hBCrTOog&CLf`BTZ_2;?B%~ z^_7*xgajj9U166QrR*GmIBwWScD6F?V|f|)mxP#@W;Hrr+GlU0sHh0E5Dq6IopL&0 z_Nc<+h@Q)=Pp9oiS6MMId~n?Eof?yL&lzpsxqmv?m)^hJU8 z$ji$E4-y*u6=LEcaGlJ}%{#ryKreXjuB{Fg2e+017?RTR3i0!YK*ySwIW;ggu0ky$ zqm+fs8;ERemw_ww+{J-=aQCi8f=Hfk!s6m0migO{ zlcgBw=pH<&3jjk7R)O?#fZqjt1WrFZJj@{K&?US+QEO&uD($wUgLwPl!-vYsLxz$A zZH&&+;vydY4-P#eBcootv9S^CGb<}Ah$gHDs+osJ?bt+j7ofbgwH46D%iDWoXh`bC zrkrnbS$g^m5EuxBsj2CO%U3vfc-q_A(#W8z3kwNdfzH(M_8K_YltdQ+Hu_S*q=-;@ z3iUKO$?x>%;nIBYr^v|LG@*o7Zrp#`LQzpuQB@_0MlR0I3cIi9L(S&q=5B7fAe2r| zj*}7-f$pCuY%c|#4-@R|BdHH3gI3Kho3$A^EP{PAN~ftd^~E$xyD*eN@uKGh0fd{USL zXErzQne4fQM#iP2I6`0-7C5L$z#xMX!EbN(P3Rf|nz^}^iO<$O+&meL_yMvTyXc-Qhu{ zrJ^uGC@lQqA3(%H8S)gEFdRP^?Tbz?;f%qA0jmUDM|c7N-la>I0w_ch)Y{uz#BP!W z_xg3KJbLs9@*83XFn8tZ)idYL*jQOvx$jMGd3iZh&W8_V!S&846qIz*F$Q4w`;`LM z%uBMe^?636(J?WOqh(maOzm@DQ&V5@Lfg>NAb{+H#7%0SaeG@o31TEs@`VeOp}V^~ z>V=or0T?R_O_+3f`R^F*Ol7uDGc!+@G(pT~X{EskMA#63%ht}$&g`TpDDv~??Vop3 zfGMtwR~uDJ0#4v?1D&1Wyw2e7w$1i6Jaux~fC9_9R0c=8)St7o)dfnCG9*BK!;uV} z!*b)~$jCIU@xDH`n701Ang-m_fr0YDFY;l#jpmrU>J}CjoNAN^I9(XN97y?qp{$Jl zMDhW43q^Yfc->cP7VvR+*pMa^J$}v&w44cF8x2Vyijar}0u5A~B=BpfHV2R%dXHiQ zeSK(K_*Z}cn3THdzaX?+jliGT+wzjKb-jng2nh%vIjDJ!aH?$wXJ`e}9Ay<1VTb0_ zXQQ5Q*dWETv!Ox_-7(+3ZEsB0!{`Fgd)vh*PL(}E39t(|zWN|Ad$42K0-zTG?(INZ z|KGzOFc84(GC7O}93^I1V%7zK;x?=C0TT)G!3byfcYFIL*kHP*dU}dP`JiH_a=5)`6hleNBxYG&vU+7j7P&GfU5<+*ti& zON~8X!&5R4k&49hQL(YeaYk)=O5zcaW6he`m|${d8{_zQL11kM1qHE2oB`7J@87$- zBi`ZqkS1ZI-C4WEq{eIhNB-$S?2$i(T1T`wsOiIs^|^UtRKi|wX82M8$?zJn4J}7v zQ2U#+kilA7T01*C!-xvh zW{RI7Y+OU;xUbaF>})vI+uxrgZ2K6bkC&ymgL;az6f5i3s;Z*{{4y{gXvC{Vb6fj2 z$rP)D`8>pYq*EvaKsyqWGs!6^fN=c0BsW0HS#iH&7e$xd5R{-uZ|Afu?C6)W?zsW$KbsIPswwhHwL|2 zUcMjd=s&;!@)2-Pl|Aio0TRCFDajLzvu&lU@<%0qOT5&`~K%BN48&?={o=rcAt9`|DYymLLMh*xjkPHxsD=o@?#VW*d0Tme$trO;K_2ct2wp z8-i8x4ko)TEnjf=7P`7pWucQ#jMPI;obHS`!o&$$3lR&LVArl)gNYL8&v{r!Ku}N{ ziOfo(;ec5(WFI{DI!WMdh4-3{ODD&N^a&zWTEMtroa2@86sr%zQUKS$z(8m#csxFw zS;pb{bHHai{|YRYzl78gtXMf#2n{41D#N;X2s#F&5`cM0Ny*i#SEHh$w$(}Z_xJk@ z09o$bxdT7@xXZ9uXPD)|Ucgk&G8(PGtCrw!(WK>12na5^o&7BU4vKl8zaLO;c5V)O zNsm+}V!t<218`#=@e0`fswdukxw^)Pc%rLy>;&=%}%c)~koNoZQ^d zj+P7pPnG=q{GL2X)#2>#?p|DR|3)_S`!|RLplV>Cfk!(vg~*d+2>`Z!_wL;YrO&`K z932a=7X#a^VClKp*)uB09=W)@JTfxk@i#5&Rr9o##QkRzx_}X)%qqvSJu)R-U79e? znr(}zjm}YF`vm0;SRiL{oG*?>K7c@Vrz!H8RBV@Dt!bT1N2BHCxPK#yns*(7WG_wwlTNpvrJ5wn=nmwM||aIL!pt)(bvX`5$VDwFLk zi-+34eg+c~*m40Ahs0Lv$$2oI(qm#`ykcN9tdS&M;ykVJ)EJ(&7iq5ZtG_ANptz5g zp{oKvDUb(ZtENU;Lc$xA*^?(v`ld}Q|12*b?$(|rc_}|8=iPOIiMcYm2TFRp%-I6I zwG=GJ+6`l^2ktAs2m3|OV3Ky*jMfznw4{npk>UCC=L{lt@Fxtw@5zXXyXQXN)6S^M z&8hv8rWk$#-Yw+$>0YzU=;&x4xm(%YGu8P$g~)Ya?(ENJ2)YV4&m11GH2#lw@;`Kc zJEF1*#K_c!?mv%z**U*!bI2W~+@JFwkA}b#U-%6A;(uY~|LHgRe|_G<>4ndNOxWx9 S_?FM!^O3x|Tt4F8*Z&KFz3J%y diff --git a/dev/kuadrantctl/doc/images/apicurio-vendor-extension-backend-rate-limit.png b/dev/kuadrantctl/doc/images/apicurio-vendor-extension-backend-rate-limit.png deleted file mode 100644 index 8d18f023f2ea1fed483092f266c9feda6f6ac6bc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 40320 zcmeEu^s2r_!B6cPT2}-3=l!G($^Ecc%g(UDDkRN_Y3rH8gj8-sc;>_m}$z zTt4S~&SC9Zd+mMJUO8dPic%P;q^K}3Fc>n@Z&hJn;P#<6`U@l|L|TDM2>O6^R+V}K zQ$F?zS^{Hgp)F&npa8=ReSZN18)^-M@Vg51CWYQGFbMG>Fv!p+F7#H;g@u8KK4E{C z=feF}6K+2j{_l5mXgSPlHAxv6=u^$i$-=_k`2)~J1#^8H+Hl5NUE4)lL0-rVXvc11 z4m7o32iZCNZUQ3$5`wg=1bzPv22fM}r;3Y>D7ChNGNmNY z$%2xPor9f&S`3wvl2XLU+)_yO?fd^WhyEu@{lUe>K?neFcXwxZ=Vk{wSphf&1qA^d zTmUXEHfRktXAgT96A+udGtK{W@>f4^Eu77qtQ}mef%cTY`!z8Iy1Ix`Q~w_5@4x@S z(*k7u&q(&p|D6_ef`H#808Vxez~6mCn~MB~3MpHIENpe&TH8U%106$*my1W_KlT5w zl7B}0S4-`GTJrPo{=4PBO8$3C4QC4{NuV8cNEfkxR_4Er|6Tash9ZF9GyfMQ{)f!} zfkNplhAIO1d(Fg9!;2NZ!oWzt$h>{64uU<*Le9j0H`y1F={)u#J}*8HmzIH!hWSM} z?sft?F70>9a9W($QcCL2@voHw;HcpOUWA{=zjhj12Ny;g?j|p{E^p$xE1d~j)=j!R zH(S*;eQV;M364lPMhQlc{`9Ylasq``DOnwiEj(e??w(2j)Y0NG#0q={}(2o z5U6vY(18>9nSUer2_XO{H=Z0FS^VFa#Y$}9L7}%k9i)GPIpEYM0@v4&|AjCt?q_c( zl%^d^8~zXQi}#;oJvm-I{2O5jxHu&!bPED&^C!6Tg|ZC4fcZd- zZk@knj;K}|zxJp#5FU)B3RIv6&y-_mYW*mA8-{x>U;8nhA84p01i{nugPe-_+zkNq z{Ni{^W~Iw?YqttC%k*Wgj}{q#SfsSp6Zy$IY`OzMF$@hGOG?!iTxxKX-0?jNww>^TX0fTXNcU5h_tQ$rl?vKx6D340S&KmWS-!W-lM9}NH)a7JG;l9W@ zg}wZHS;&21f_Tz!3Sjuh{v_S&00dIVBV=`Wib*YB4|1H|pXAT*_ZIz?-Ejq;HJ`8c zcz+^NVHZrUQ{+T87*8i9VJ8kcrMSJCrP#iyqH$X95|oLfPBI_LoZcfOpp%MTf=vqU zF|m}1!DEQt8corPekI7N=ldk6{f^zB$(*!ZJ!${p{eH94Rd_Z$9KPT^WX>$6@tjP| zMS5~1>pqw{qWpz3+%|4_W9_HaHXkW*qh>dS;h}Wih*vkOuxVV|Y99N&L>B$gWMhZb zpkLJcQ&oY?ek5F$vBerCq}zbDQk@#zY~U|_x8sM=q!k^l9{`yN+G>}5Tk`2 zf~%=o>w65DbeC>1_{TG+X*#;_R&2$9K1|EQO<-{WqJ z3T;rIoVGyv$$tDtUeEHteOnvF%THatw%-lvq5;Zb}o^Np-5tpWu;(GUsayl702w zAI?=37&Oo3+syKt&y1v%*Q4@fOZ~-){rBD(Uwot3+8Q)l_}>1!VupVZS$z*Ny1K{` zc2`(()Zy?8;Z*J8UV*79FnrwcynJi2ys&*foQ$EG|ADDUWwX(^X&btzQK4P@q!bKp z`g!~_7wSR%l^iwsd6x`wB1~a;3{VDjCu3|-d%os{@VO+uc#pYjDMDGUeF@V*vt}w_V$agvJf+9UBJ<&~N&y%1rtZ zTULd`)KOyyh6dB|WMRGcS???EumSobLz`U|z3-bHI6mB#BgC$dQ+4bW3FGtg-IZ3M zUjgxC@f}L9P&xA7N2PbhclQ?5IlB96TP)I_)&tjd5z=gKG6kLcEFqpt7yaZ>EGpIc zEDAK-NZ%B4z5wN0MorO{bFhz=YA2Io{>82mBo=;K?B=u{&+FS_CiI&FRW#W_nVxsTU}(GYx)ygQb~myNrKAhnnNLP}(U zsWYrOB_9Ou0G64}!Bs^7ClL;<&m{SrvE)e{5bZoe>BM7h*=xk-bd2l`S|lOnp?5xOe+ zswcP%nI5yehbr^(czU=OXEvaAl}Y*MV(WFfqQp0ajDtw0EVFNOKXlX{(PuNF`{)K{ zMw14P5RTcfFLBD(*=O4Qo}V5&zdjr-R&NjOZy~bFDP-`>a5|rD_~CUJ2d866MI$NX za#;-Rg}-Tb+8NouJKRT@xqKz)bb!#SCer;6Bf;|TzrasaFPiueSZy`Vb|aS~mf*c#WSj3Cp)^&;WsKF&BOl*bA2`2u2idz~>KkVnu%LpE{G zLdCq#pp>0hA88X(8thUMZuZ+6q|X#ZjacE+ojD$VF_#<)RuVmfYFrV8BKl$%)XojB1D8J`Q)s$ zB1CAKScls5<0*;YutpVPL63nw`9Pt$J1P;K^R2PHPh(ZN6Nd0o=*)#30s0k&J{oV^ zbsCf(AF?6@?Q|O`u8ttd!(TCpuvzb}EH&EQj+!i^h{kJJ;tevx>PZyO>VazCO3~?TpNi z_TkgfDjc||R!^z<@;=?1b;om>5!RT599{=w~-O#CV+6uY4 zUxtK$H$)ML*mTtiTpDa#JT@_;hH@vnf+M~MAbN;wn8H}b*-E@@n<)o*2P z4@x`^7>uS4K^roIt~;HA-ixh+&`)40<)uB}DI$fqeQEV|@`~2YPZLR>yL=h|y-ez< z3;6om-EBKC5vT0Tkn7s{n*c2~N=gtrLe4U=eo zRN7EleY1aT=hoY4&pS7YWM;GW>FIpC1)0e8l?X_JfjYaKKsEK~0(y{!$PQu^dG{s! zCAq%^!0nD_aoXI5f6N=p7PW|+EYZh!*v)BuZ?ejwQ@z9E`R=1k8grS4*-5*eor7~p z$=n))#c;C5_F1d@YMj=ZaWul(I_K{O5z$NCpR(TGrj3e(`%3XSS#nEwjf8d$NnxA+k}R_ zMPMr-GTL*^xY+JTK%LnoEojoDY{u`Q>fWDLc%&`^+m>Ws25w*P@QK@YPIw4C0ZvKm z;1t|H#{MO4vQW{cE|Ezsh+g>OZylkB1I~I>k-abqC`>CE!fFXVAG> zk`BFm3OAA!Lahn+JQ8wkVcxs`>IT8K<$Te(@!B1&L;-xlsa3wz^b@!I#oQZC+D6~U zrYiXuH921VdZ)g6l7J&vleA#onWl}o>Yb6aymHpL<*l=Riu%sZ=*kDvS%RC7dMVk) zC=U;x>@e5i;Ka|=i&SNZYObgdgbNy)LvD2PR|gmBPjao-n>O()T_KP(y|+bKkg?tH6Y+ zg_!3mXguPRfA5VRR=6}S%Si*%N`NO+Rfy8+POxk>i8aBM@_sL{QI&!In;A7V3!nvj zjc$*nRmv7=qQqr?gKqhpHgw-xNjR(z(1`p;%5q%8&=Z#F}-l$&P5a%BF}RlhR$#}_cC_vVUc z9DUmzx?l^u><_SHNRdygArYk;pd&u+_#Eo%fB7UUA=}g*{1x|xS3!GXJ>u{ojylmK ziSh<(a~u_4;}D3^`9>NN{B3ZwOP2BH-0o6>1XYmh%Ww~SGZcXS_sS9y}%4!m+GTfX$8*UA=3q)Z2~;dxDRL%@{Tf&ACtCW0tdj_RW^7>Cr1> z=2NUw9NU^TyHym$M^oGuS>J3A#7Rz)WUg%c=HFxSnRLNdUAY1BbS^Bb%?C}dD(Ztb zk>fb5=dBa%8dfCM05K?m%QcVf=T9a0^i14Vqg=_CGH^r;tkuCCqeZxcFnHcKH=f^(Y>k~@5h^9G!N!vlYexs#Ez?%ukEMi zrZ(=bO8S0+h9CB}2-q<114eMiKI*Dzx2 z2%wiUq1zn=l&my5FOJBndFi8>R0JCF9sO);y?r-9!vz`k==6;p`gZ1WIA0UmBw@l? zY#?#KMD{N!0NI~Tt}QW($~Nrt5%DU>O!);yo0ZGwsPjjNoO);IqHQ`4$;d*Z-zYt^ z)l^2_$7#>L8W=;AXA;3yf^-ob)u`~vdSH8fD_uCs`l(F_mo-}t=aehK@N-}bSrk|66dc&dVt z41ZZ`KrSz60?Pa#n?*FSMs)W2Kmf1+?k@)C$CSmY;Uy$R}xAu_Tjfj`}! zWS~u&AF49;H-&22YJ*8Hfp?OcKUF{h2xnKMoGbC=_%}vBXezBI>0RlV4Vqj&+d)kj zIk$+F-A!#U39zVt89yXOC_mL-Iyj%vI>>}X*uRaa%~z9&`EcGs{#T9voK+wy<0Ka$ z6ED~v`Tc^e^V_^c=wX=hhass2 z`^C5X9KQA`l=Zk*0+=9*Q^Ag1m>hU-%>TZuKfi)8Q|eka5K%E9{sXJGaInY6xF|n8 zWd17C3=1v&dx?W-u*d2nKl35JTbL<9FLO~QE^pH zls~!>X+kjV4JRm1!os6lPB1-YV%68{>iQFjOcd?yr6TY`Wo65yB+LF7>VmTu`fzwP z>mLJswuSrYT&8LtGgetyyei4GuSwGXYtchnBXRrhQ~E!6n&Vc=3P#`53^iVelpYX$ zq+2#%>fePPG#@P-k0Y^4hN@rv*)clYF{+SQfAr%jIjc$8_fT0Gcjh%s!O(y{e6Z6LkVY8t;)Ogg&J-h18 zcNbSZ&+oyr{?eT+3-E=iPcU-EPJf^RazT-Ju5IGbHM!cX{Giey+jOy^C?}M3S-FRq z{Q6(p`t&vwUo|D*ex<=&8kL|enrYYa ztIG;8n;`Bga<^mb#{IGNzjDKtz&C^YOn&VsGwok@t*cP!@%^v4B%uTqvOg%x3gv!r zx&|;ynlJyY*8FETv7%^`%@vh~YTRO#<%ep#)5iQ`FT_e<`*WyGbekb{DpLQ&Yku2; z{lCjnid?Ni%kr2-W+MK;i<}qwRa4ZGoB8~E(|i^6C(g^?bnQ7>tf$GBtRlnmTVi^n z9C|>#&P#ZRMH*DB3Tuf9u0p5hFE_%fB$QYDizL{F2&R$oPwjr!GJ%D|Aw>Pg+hbXr z8e*h;0@}plZ(qU6$3HDSHfIX@$ky4-vwFE5C7MZL4ntT{tm(RJSX*Y>W4!OTiPl$s zF4o)cYl}JUwiF@#R zNPT!(bepeo9flt~CONJn7ucvtLfai5EI(S@+PJ+)-ue`7{It?6wKJYw z0O^BTECZrGEOCFncUvKH+L-%g?8pyXNjz$|jq}JEjm=v0B@k)P+F%2m3!Jn%Ta7hj zS7h*)Ium}`eTu9_=HjFr!u!lA>zQ~G2E+V0VNHKQ5*dJ9^!<%T}FhE!3($Xq=?KS`O0AaY&5nT&ek_&jj7<@3 zvvXb`S8aK#YOR|Wu1zyIW~XAGr6-L+5Np(JQs5&%G-wc zry`PkWXLn#*l2j>#-q+USc%l?NVMSDVp6HtMQHL&l;e4G5(U~ms2`izB8|C-GIw2zjS`|UtH{t z4N+YLpT8}dz~#5s4f#H-MF;!j7(~U$>tF8W>jv5<5}0fV3%X2ib4*(Xs^urJHf@h@l+?k ztRa?^l4Ipgmax}Mn*8|k>klpFH)tmW0MA>_(Ex>umsxa!v^=I-bL-o z{iWd_>t96gm;oXiZ;A`+ZJI*8UMeZhHhg}il)bQRcSu#{<+XI-74D98(bu6f(;dwK zu@M^XYm9=pJcF9+T6ax&jYRyiAB@^z&zXX)eU^2MJ zj5P#eC2$es@a?xxHITJGl#b7S$?F@GJ6agdg-Gr1NO&)kRdH&fQ~q`hXDw!W~uf>krV~Sa>FsQf61WNrdS-UOkg6c)VXnC<#A^KheHpePV>iL zk0?L&q!$@`fmHAX>bK#lMY~qhVUb;SQapvXE)L>W~+;EbL**(=)uisCfC*4**-jBC>cJq4`MX6W3bh%KtF%B{x zN*~n+Cg}R35wcm5!vUm&hcqn0suE3;GZ-CB+43wOtqMHAb>-mROFZqzYVl;b+fRu2 z(dL6{-p22TFFd{>dp;fTDx690H zBiGCh9D==gcUILHN=at)kt$}Fg%Uo*kJ3m!=WB(F35L|7%IpT6Tn3jKlN95g<7V%q zH>fw_@r_-JvToU`qBe=XDXHY{K6r2di=lxz}NAe|-~p2WyxC)`RU zI{Fer=V@Lz&02m|D*37}0F0c@Z6qAND4t^L)2;Qp zj5j_Hoetd@K(O2v1JJ5np@%84b(i`@~`G3&{~lrI!%C!CWhD+e3aB408)TUQbhP;iIzPTDA_ zHuulGZ!H%3%|%bK0+quSm^^TC(kC z-lVr)R`+n3;o(Y)etElzfz#vh3~H*oXiVl%F?|$kbX~o}m`xF`KiPwb)9dN+Ju&7j z$j>^RQS|M9_J7QG9&hWzj(gvIL`t0>N-2`xBAvnlH0(^hlxm6MRf711aaoA2M1aJu zKRg#3jt#!Ra|Mp0`5pA}d||*|Pj>NktUZ}gBe)8!{Vu+`qoJXnYnvnHp(^%DNLshf zhwUw*&wLkRpQv&8nD?1Q5CF@aWhPCnaI5%sB9xYskWOT3<)yfarFW~gV%CF}uXdyU z<|(3BzB_-Bers}{Mwx~dI6rQuU9WEg(LDlSr8pV75t{JLTKz6EZK`)I!eVvs0?_`^ zJ~ggf%h+l>Gg+xWnk)RB-RUVB&51RZc>T`28EwdhbEQa4E^P za*r3fXzZV+zRya?rURZLcOo8>*q@0ae~J4&TDO_^lYslAiV@b(=1|+FJTXa`os-K| zBSe;DC{Nuf77Cm9OI~t~c)uRkZ+LqkGCOBEN%K%~?_v$OMJ0@180S@8J^bPmkXTz} z?d=SPX=knU9||V;`#I=00450YYi4_bzk_++GLf zSI89heJ7t|7)ta;Jo8GwL@nR7msuP8pNQU&-S&E~v!-68F=IKWRB+;TXwh1g_wyDk7XDY&lKZDHog~lLaV;(o4{s-7-RZdFjd%P}bj2E-3F1AE3hmUwY$a&9|Gc&C31 z8GA8kntE3pQFs;v7E!5=^zhO^D#YQ+HpcEE%|gi;PYSEq-G@jmxa{{#&Y_KhP%+@` z9v+`tq6&kZcVDnBZ(5E03WJQu;9crO+4|4m^+7+hm_i%j9Ge?4k1`u~SNBec#CY@( ztyXr6kbjUI9j#>Fi09OWeA+Z9kTcljrtNsDIC$jQJ6YwEQQS<0`_#Q?ehVW@@-6bL zgkR61w&ZHV>hsF+J7RuO&hx95(?WZBJWn{B^8CDFgX^W-O zev4e3P6@ddsF$-jXki$5Rsm7dnouO=1%FxZ2jPKX1Goc%IP5`0k37 zU!?%;%t7k2m1VQ?E8H{^F?)GgY}gyvu5@I&Zq%D~S1wkBu{vD>D&yeH< zjzwQ=9A&U#I6Syt4G~-w_8<(A8UB-Dkk{`{2S!`SPu&IynWY6dlo>l*I;2Yp$%Q86ho{B;LhqxcunX$Gp6GXC4R`jNZWz1z4I7bl z1@e__iTPe1v^*l(pPn00v4!{jleNn^Hi)A&cE(b_RRrOLgd!m5mv;2_qT)My$1OSO5!i2fpi}=WTORg=D~S+~A8K*>0T&E& ztfsthoZ5~;5v{BRFW++d&Jq!*O&POX&%Ws3^@9+yn_lk{g(6H?2^v66G_dmX1Or7< z&tgNSXT!<7uDPQ<(D1pAUy(&sSII#ISCu4*piMbgS;)t{0Ubo-^_eo{UBymXnDP(K zg-RMkiS&RU6DZ#qQjo30ly)8V1A^-ZBg$L?z)VrxBQN3e{f&f|S~mJ3O6&!-e`pLj zwo8Q70+n&o0A>SbmI?s`KW0}Z`(wy|-8)}4OWOWeTi6}TR_PrjAP!Gh>T|M0G)e_Y zhH$!`_c&?{b#|kcdz^K7+;h%V-G8c|CM$H`opp7(mEC>jHab>9k~0&}yn_U(^T;-- zg^Q`EKJm`}Q0-&nS406I?ID9zgTU(qEe5o^V8M^zWELecui|9)5*JF2se=-F}u-V7X`Y5o(AuA`Mk z$rb*Wcxzs8wFuo< zJ~{KGC`Op4je)&rH47D*WvzFrJI`<~{e0eF9qvSeIU0Kj=k-@(9N z{vK4SJ6L6mFeKnTa#Uiz^j5At8(4FIBL>x|3(*NJ30@$WA_^G7=do8}ue;*olNJgP zoeFjz-HJ9^DS5Ogtwj{94r*U$A`a=V-rK{rt?6^1#%IxzKdg$g56@9=3i*qrUr*Ny ziE;L=aijY`Q*Z6Xb(0e)-K1^uUauK0)4I2L!?Be6cm>QpugsW$z(9Fy4*!m0p2`pj zOtQHOsa?MYBvvn|_{5joN`Zj(qw4b@^Cm{dvsI(>&V@^K7~skHkwUPcJ)gr4GfIzO zN%2%OFJgW)S2k*40aIzV#$<(fjaL-^KumHasW6I{Y~m@{k}rc?bUV0qAfco9GcSct zXxlE+-IuORGgLhvT^knG3J_-J^1vmkalGlEHI8)C- z6~(QO2j*F#%WrBZ#M*M!j$&9s(++wuD?&W5vOg=w0Xv}e{STua&zyD!iK!r-;LM(fH2H`@9ZRXIMbMIoZQ5)X@)-5V~$ zAJyEaE%cGiw8V81hJ!Y@SO7x{wU1iN4n@i$jrx(c5%qp1Qn1d0M1TIUjF13 zg*ckFuZ5p=1m9(3#f0QU@I&e?oSzbb!IA*>i0Kgpp@fdVDmo(OF|4)-DwAe)qHo9s z#_(pn(OB)^)eAb(a8&ZW%7#+?hny2GLKhQGCc{7zf6^s{Qk%DxZ(dQX)kJ1+n|uul zaK0%)rq+o(IXYRaZ)R2Z--;Tu<`nEUH4@=?j2ox-qt-@1B}yh(B{f>U7hBSHKw%NC z!w%{ttY9kJ8pzTsE2~V+5pa`-z_=M=wS{RP&Rq7+aFr@48Jv6q;5GCZmq?0qa4K>{ z$F5RzvRqrNvi@x9U~V_>)H19w!JV&?0@ltM=NNdGFMbbWeQ>I4`p{X7@~(T)%}-m} zoMecc^oegRs9JZEH~HH3kWpb^R_qcS{_#aez8}0#7SRmG<}#^P*0=9)ctGh^xVsOZ|pWM)rYt&G1DS>>aLG z%%&H~>O6;KQj7R4x`L?avUv6lQ~-5w?sgX`kLk^>Tky17tV0L?AEY$u-*Xp_K`Fe)#qUqTL0iGPKVy)ip@n@9{tRY&b zC!)m?llCVQx-3W6&SFp2@*y)r*Bx#@P*kv%F7{1dm(kLaA2Wzo;OWub)se(2vTI;zKxLchH!>EtpNNhZ zLd7;UjnM?f>PZwM=oRej28IMD7i|;S$%q$GZ=v@$sJ^ay0RbBYcaH)VcM`y zdou`Lum;b;A_4;!_^fkH_JFp6-ygqGQJ=c@N*;870fOuF;1#k}PS9rz)MF7gk+zL9 zL#9^eNcQ+I!XdSUQME3%5DRzdM!!a5WLr^`9Ievn)Q4Vg>&Y(>9`_et?ur+KDkpXZ zDa_OAO$6Sgkxw)?GqZm;tHLdq>Yq>R^N3!S;xO@F{a*7p#v|a~exeX!gSCJPFiR+;UGP$unSGV#8 zi!kn#MjLkW>y0o(=q)&v>?$u_UhTUWFnr?V>6=0}It4|S-P;%FE;_KupH^}Q8@z;> z0d>Q6QcH+DFSzWLj#Dz??eW|8TbH?K&mj%LAfX@T`&)ehpv+)N@2wq!1rHA8^u(69 zGjFr7-WiEVl6GrLNAC*KwEF7o=&%zLw8{#38`AodWfI)Lr+FA z#VI}-RDGD$eU{`tK6`L$_p2-)FvuL|J;V;VM@c|!p?cRIh@($RHRnV)?sjO6>5nGA zNu;~);b;aEH2qjsPUhOO?uBgg<#dN)g9Wg0Laf+^u&Sm_WVM>=DGT0zg17=aP4V|G z;I16FdayW6a5&I0Z^rKJ=S`PgN83oQ)PW(0IGw|zt7#HvAoOcxm4eHXzA5;W!jr!b=R~*>}oMs5@`ZS#yOhi%0g+BM&b3UP3rc|y3 z&1@ql;~dJbU(R1Z7Jl)PwY@d6n^6`^70^H>j)>z=L0vu$oq(GPxc(VUv{aEY5`)sIG^a(93X{V36b}$H}jyea6YrLoT=F@WTsj{w8L&?UObNV@%y~t-EE#)(l`V{aLhcLj4 zRBRmuj~=z|xI=B;Ax`3D>lF#>ZkAqcqQ%SlgO}yP?)F~Z(YdyfEZ&YyKs>Z#2Ox}l zj~SdlpO@#3@zM&AkSR zdQOp2pc3zfaaK=31kq^g>DnV1l=;<=L%tUJo~aW_X&4lO+>BX0RE^^pMf&jiMhGs* zES697lQg|cJEDUd+-pLyTb5LH|b!2p}8N(N%F`VU$^T`{gK6~UDi*5CKn zE#B(anAF*=wBeoF_WMK(rZAjoNGW-oJ&HMXlt%H{@Ep=@qr4Exx8rWGH8bxFAESs?+Y(3p6Jx;^osfpzT}Rr7EQ!6yoli1)iC z<@b=2S%b-?Nm?bsAFjY4y@_QO+$1rt=0;_yj_XV1x~qP8{#bz2R(Hu^C=Me=;u=CF z3*vRc)1HA@?eR)~iR;u(K){j0pqthwTXL_<{JBK|pp;1Bn|tGGw1k!ZY2<9V~pfsBc{#|`vgU#N=d|0kaBED4CI*9tm5S8v&}l76am76Z|c{h zTlFaOk|%q^OX0vy-@m!W&s*VrOVpx7&g&;G7zm-I@K|UPbZx#ct>P*9^;OpIdPr}g zk9z_=M5lYzGT=Pif~G+eoC!Paz2{?|jWMDxiS|Hm#QcbjO`l~8%MA%!ydJ&W18qOI z9V6yr{K#H8rtmU|goW&qZg|(SoJB0FCD%(a{l$CV9lR4K>@Q3ijL;PhQ&-VT(JSnK#%+o0 z6%k?Z`S5~Q@i2G!;55%Aj>$o=Yf)161}C>!?xxCO5bE9L41&@3S;IR1LD(DnOFrSH#u-IIX`D!!qxyluBxo{o#VojUVIuGl56tcg_*x z3{~l$-fU%=UE)6znHYbk1>u9SHADaKU~h!*2Kez+V1lW2{tRU4VT3a&&(|u^WP1;5 zZuW2Ywkf&)bV>Ithk|=bD*v}R(|=*;r&loLwLn178zx$~Pq7k8O#^#3Bz=Z)V{ zE@zmgSO50Yo03B(&RH=&|Np!FetHid))#M{#wV7mM_igNE1bpT9V4YWLcoEot0~;7> zowM5HDz`cNP^oC>-I$eFq^%L2#9Fbd=X*Py2%*5S#FM|HrxNoSj?UF79s?6jf zzA0?Qhyh$$y>@$i3gz*xYeOfwOcpVyYGKu>d!ySnrOALkCYvefr71=#xamdrht%KQ3w!Tg(j=PupBxHr`!bzDSAg8#PLV zdnYFwB{V+topTd0Sy>6Lx7;knzWloyG{!g^(DoqPZS3FYada2;qlHMfoqDnEc}Mm{ zghce%j7ha2SH1i)5otWDUP8BJ?!D#c)0<-T3L3g$Q8swwb{b*7RW?TCtL`h4^?eU=n*Oxr#Lv-f*=SdUgK16J2cHYn3zl2OYwyzXPvE;-STAWj?B5u>Tt$wtA zA9hyAPRZf3pYgsq8Te(UOVOiIsxLX*=DQifLe2on@CUnj)L2zI=u_AQ+zeCLdO==N zUbN}1G25)yN7O!jmpt)>@6{bNKxxT*B}#f}neW(*R2p5G|8t47!vF#6^_NS z={-ApIm8S5Zmb%Y1Kf|7;<}}xhoA|pJo6g`!xjU=H1hLw7MGzk>ek+6W<>tm3rqvZ z+pY5Zs^!KF*$;KjDc)jUq@TuW^hU*$MC{teTDr>ylr>!cBN>nKkVPfCug?A56y{R%~3fvxkF_5C4r%sMbxtc+RB?fVR>0b zt-2c4lXvfAGWenr+baahS5MWxBlWf1qVXC%Hr3LagVGm*=(ccym|Wgoit511zbrRc?u#~jt%xDJrl zd;lFUdA2zB*EC1G7kX;mvE&tXScSCu>^?ge1OyWh+81WFRBUx(Ef^T{hUP0~$;K$* zvwV|!KrX2RG`by$v)Fe9p;+7%;+fB#Vj&Q^GX4s@gWxdsK82T|P3ZA8`#vvgZw3l< zy+a0KmD0));@gXFrtY3C1%yGD$S8v!q0IbP>%z4n)Z4bQt8Iphu*eY2_g5+ z4Ajb0A+zME?@yGQktBiVYZwyHgodS>3+{iUGz1##Bes2#i9)Rchy8Pc;>g|M45{-S z4}B`{W>68UZi~F3S3%T6wPRKb|KX9b!;*MU1d-CRSSfhqq&d*L-A+h5i3+S)5nGlq z)BKcOW;9Fx%Pa)__0TP6O>IsytrKJL=fLxQ8CwBDHeG+y{lsbu+2M_f_C%dY`iytg zVOtnA!r_zgXelH{aPEXG_mF2Lr7sPIN)$Xgpgc7%t(U)6wj1_Q}5_^nwPo2rbaXPCT=8dh0$v9KEoqpT64_c96nwsS>d*it>g0Um5HU#+?{vxB3AdZ9=nET z#p1U=(|3`CI_oL6`%5n6>D#M<%q0I2VEtoXfM#GMhps;NnlYpp`XGP*I@b;aXdO)! zV5sr|NOPQSWXA8jqglo#QQU4m^%9`LtdDKz{|LLBLqTZU@wvm|ia>lR`v7vZ0N0UC z%rs02>|@bA{`Fe`?(tR0Q;+vppr%M8lQ3L5)nU#RIODI52bb15b<_h%2#A>+PZ!GJ zIgEx=+S_U+1W$*J&P|Mj0u81H69m^YyLT#w{3Z90b(rn9$>GO?8Y!@=Jt)ChRUg$GJGfQC(1hcsk{ei~dP zECPhHV-L2`)z#%fV|Qz>2(SHj?0REOx|j11?4<;q&HXxpcl?9U|0Re^Qod*0KW(m4 zt-0gAR%cEGuuNwceI265AJYWXB@SD{ekEw;{VFQ9m|Naa?{j#g#3K_rYshK;3T>1y z{I%ekUlhg4t@%0rtnae&r$u7A2_|i2l-WoHao{h+GR9TIZr4QQdW#-vEak&Xg7`bmXTJAB|}h3N6Ml_TsB*RR)QWI}-t5X&>h zM74$^^p_&aKDXnfqz5{G>$b!pX}607I<0e7)dDNdyE@Ww8M_2y15O40~@Z&uD{3g=XN z>{azZi$C%8%Kdrg_o{THBjD1W&i-^67#_%Pn?q$%Lmy=x=IG^kd=O{=tjieG+m?Nu zfX5d59QSwN_a`j4s*y2Ti0#9^8UvkHIVDR8hO>CrboY}xD}LuS%f&EB`+w6cjmYj37RMQ=L%1btg>ikF)DH@V%gJ&Hzc z&IcP}^cOiiWp|Z`>5&E#7@V2z#za0xSh3zI%C$xH#~>`XdMU-zE0_j~kdNoI=Wa`O zx)L$wG9rgp7({}z>GQ|^&)t(l&)gsHFmmiVurPQ(%#CPQI}cnky~blr^mpKf{-Obq z*~3PGoFUPy5P0Lrdu+)gAj@2nU59bL2D$OAhupBkU;355RA9%!bVOOPgVielcIZru@6tA?ZhTB5QIJwBo=E*+<4Up#j`rI@K#JWO@9Kv z%LbH>oft(lc4A8X_)PI$s52Z!j@kI`N{9vWX;HqYmZmO0La#`pcP2>g1|KKmu;tYTRlj^QomPAtrOZ4#W(e@V`i#IPp|3YF#$ z*yITB{Wz8$Jhw4o2v1KN4CPtzBHUOKSy)b!V$fOmyjQf??%!L@bH<4;%Kse$Ed<mT5?tizoU4s(-QU&>7udw`hi+AC#LGXzrtQyDum*<=nb`{-r>LG?+C91+I`NV9tC+Xv@rf(y5FX-1rP zG9$QcBp@}h;mb1FzxuY&)I(*=(v1DJ^UO*)F_4yAd$v*b@PPgq;%g;X@*&&j%^}if zD7hxjH~yD01v6kh%5$@%V%-6T3(fUrvnq*+9{F8l?60rXyoxvel>fYq1>*1^)nem^ z6}CSIpZ@-LQABMt8uP5iKe7Vs*D_pRq>9Qv;5zU)9ZL>TaXf@w6%e(Nz zY&M6btx`g_;pard(2M))zoT9D-xnJ6HvaxK?ewF;S7Z!3aS0kW9PIC8L%W5#=k_kN zuwhd9WL{CHaX~MX>b?+^*@0F&5pb|f@9|nMC@ELk7X}R%n`rhG8HGpPPj#NpES?1G z3B5DIIZgEK0JY=#uMXn<9`|B>wzIKlzPD|as+1eI-<=|T-{TA=BlZ~zQ^}=ur0GQ> zl-H~DOf>~pCf)x8Z^$rIX5|lFWj5ss+uU?()4i?TUu}uf@eeK# zXp=sXfZj82*XDaAtG4Hu@I3-4&pRnq4HOwMLGfvqsnRbPg&i5EaYX(4Onco_hkMBl zt@@Hx3SP(WcBdW&(7%UfJL)i7$tJjdRCxxyER`p-*5P#x6((uZDj_jDwA;Hd|ZR3_btXP zh}UdfkU=p^Er%&l24X^uK)L5`P^07ztye^#4MT!nXVQhjCv_*mqWCsP@u;dlsQ0C2^*lo9_J@E{!24C6Fm zk!tKrct{I9Hd1v?oDgIARs{Qa&JL^x`j#wwBk0t21|NHEi!nVYENmtoG2#VRW@U63AxNaUhl(HY%J#LKj$lJhpBO#uwFcE5gft4l@5zqnVf({f?x1L2q{x7l_=Fsr79 z2R?aH)8cW#L_3VOa|&D}r4#5E`j z@Gnizj8g3SVobHQ` zWn*nV57E^0^R%0ixbtdQ2g`hK2kIK2dorEM!ndJNYFI2(BQ10Jz+t4a?j)=ZwevQV zU6ef*NSX$cB8n~tq*-*MZwbL#l0@RW|NBR|HaTo$#8$^J`|YpMR@UtRi7;r6KR zU@M+35w1}`@s`Z_+R5`HDVK6C-7ob?bEocdt6dm8N7!Sml$&{JQNu>5Hh1shwH}3B zYsrayGKahgR=>L|cRZ~qvgr14=iYG$lr}{3VH{X_Q+6>|U`-G$ahcwzovgic3U}Ec z-|?>(#WBIyieiF-W?M~*ya{VK)7|p5#DqV2FYzLp;WxKqD z3qPt)Zt$NyZL5S4MJ!7{JWOn_;ctX9G9Rw}lJb%e6-90uN{Lc{c9lCbkY>}MPSOl=oz4!jg z#~}t435KPOouK{P65-Y+k#d^kS3vqG**EN6wbFJ^aEI@lddvyMe{n`wzf`P*E`f3_N1RlkbV0H9lsw&od zlhK+~{njB)!B0OfxIsiyVtVUtiicLHe45GhSZ^>Xr?wy*-%osx zz3$pJ__}_R&7~oK@DqyG3*XP`Xy?-$?)t+{iZrzbroVoO&qC4Y!br8IM8xgbX>i$I zEB4-$+zJZW8kxONADeABP{iD2g_F%89?h$6z1UA8RS64l6@c#cdoUKuL8M^ePiMgzN#Ljctv1X8tds$%EC-at=Auj83&KmV?Q~v$^ z9g|QltF)q2aP1iL?Bg7U-(#-n_hvD}mQ1R_N`$eQuTDP9Jd+g&yP539)0fUscPx^c zqxm#PRphcYc*M!CN>KQ`_)L|>gkZ&a8mcv82)>u8EU0@&5_)SB+?}c{C>`GKJuP^#+AQ` zfWR>T(=u3&-*cX%#kj0@E9U8UGs>&Pep%|v7uIQu%48t}@D+UD^Nd~<`iYYwFHYjG zQHWXsLR}l&$M(84Xtw*_uOD-&7=Q~`yKBcDm=4`s1Slku`k$t+-`if7rK_-A#?qXoBJ6zhg1FR&s!;!s~k(y@fl41u&YChe%*a#qnCSxY&bpzmv%qEB*WWDSf0~oYtI}+#O@u@eo z8s3#QehFakho*ZHBoFD+MI4}KpZx8%h&vN93hx0*JNv;lPk(BH_a%@in5Ua|4(Bs* zkBQ6W1diC#6(syz1&hq}!G6EtC@(9iUwBVTl1FSGqQuEFY6R;$8mzoyF^ z4~nBkXFtxhE@(HFAz?|V5PJ_xe&b1DF?1m$v4ds+YqVDcLnz#S+Kf0QyNtd8{<2T@ z+*OH%x%(^i)F{|QvrV`ndEzbaqV}U*8njmgxLlVtDr_-&iArp1oD%KV=Q?t=gsrM; zo-^gl+(R!~BF7P~n(xg{?;cX|I2(2S$vujD0T3&BhSZN>O6ePg$85!G=yrCeD<%R+ zTSYoTz7-9+7CJxJdFsmLoC;L~u4{8SA9l`_?71eADD4l~bh$lUBK{OV?6kcmUBq$5 zn2Q)?QD($xtk;aGR7n*up;alfDPO9g8q>{ZUueQPj8@Zix1{TwPKq)F7^jzb29V7@ zj$Qf|dtcS2!rl4 zI`C1QuYzcW-^nGisgaK>+`iWFJ&DjuV{90v9OwL7AoMgzE$?0Q2;74{Q5KbFU8%Xx zXE{Rv)Db6yr#DcPx&5K;02-UF>Az`+Ylo(%rCB(fVOV(QQ#9?}FtDCAhurwU<6!N^ zjkh~|vBVi-Hg&tZk&p9S^MKzl(mvC~rDip3KhG@r^~I}?>?hqhq*2CziU$M1ftGCw zI|vS3A~&2^IO|-#K@fCLNeS9vCT1_?s#ESd$G@#HC;M=TSblChrRzkHu2O02+Q~~f z&n5_ZERz$t{BGV0w+1YH^GPkHdJe=*cD_WIFSYWu7!~rU2XN0>(3r=OA$u1|Hqm!Uhp*fZV%p?*gvde9k}Fbe+=I~#Dg>^$eCveVxG9NV(f4Gmj|DH?LHgDnZbKsdHu)q8O0C$m`mJB z^yDco>+^i<2Zn5RE2*xWwf!Z4HKh4eO!zO`A?=0u=x*u^@!N!7!a1{9?D0M0;tk(_ zSB*qj1&C(1OkNGf51j0Yz;rz4YVh!*JC;+@lgmeLU62kub-av84hza*A^-9i(Bt&= zBkYM#KL3O4oRkEM(??tY(&vO2A!goM;iTXH7c&EhVDz6F-wDK-yF^j(J@>WP4bv72 zIAmXDw|=HW9LY!Qk;Aea%ShB3`cq5dAMP!){6iPss`%{Z-`6CxK)B31PK){s&NE3s zKe=m`ahZ+3jEd&AX(gfdm#BY7$+Y$Px(IZ0)do_5pnErmg&^=_w#QO`AybS!gwqn| zSv4r6qWFmpeosepS+6wJ0qYWU@xBM93g%JgF58PW)NHFr`x`=eoYCh|7C)NFq^!KlCg(k5{Jsl-x6MsT@AT~`~&>GKuM*=s}d3PkAkkX--TS4 z;4pRrebrdOcVui2)UTYoEJ6dFI(u#_`Mk&OeTj$Lu{qcM$nOV4YW_e2G(vmV%ka9Z z9gkQaf`cl&+hqX#eOIC5>&!H`6cLz~bqWP-r39pXEQINS2x|&zD0^$KOPwb{uR&|X z9a)gImzh%AZfhE?*5iQpZ;sD|5yEwhI)dKV0Sq~u!QHpEH2v)%2 z(rye$+pT3hO>M2DmHDhO;2|VlT!kk|3!T+_()|j<4o*Ta9qH_oSpY?alWozO!!8F- zp||utFg8G=k>by@XB8Z~NUWfzYjh&Kvi`b|GyV8_bd1w_dVl|#8wiE$6yX)=gCOyE=34fu*&{?c# zC>q73J1fkT!>d{CtRDEpAW$em-ZnM|K%_c{QGbGhb;*|klM#)4IJe*Bu}dmrC~z`{ zq_zhN^k_qHZs~W4WR|_IlVFchlkp?PrORkhh*&Rp67^2XqfWE}8JtP)Ndu*lpAXQN zgiO)h9}!$p8piB#?E1*~OCpPdfY>5AW&{v=+TI_$5?A}dD_trIWq(U>wm2$FKm5F$ zL{#FmQ&i?8m!+l*C?w`>A)nZ&{^t%_)$98t%zBH&!rr`d08%yomNioZlXDCvP%#++4a0g$;JX5s;p`yDIC+mKWUp8=~ zBQoD@DHbQ2J)Ydd%9^QzV4R6hsUtJaq|=Veop0Fr@eukE>bnkP6m~sRf3me=hvAD{ zT_U`5PiE^&EyiMs?|#8cq~h&0&HNI?9L~b^s++X&=6PNgl>!mZ)OBNe z35@AW7a1yN;s*RiXe?6cTHNIV3N3U!=t*l8%P^=p+T>$IGUo1%mA+C~|5Uc*>*M!n zo$Q~B*zWoc_fLAAojWn&SQ1v_Il|)cN|Nh*OKl~Nw^Ti}a~(_Ly>b(1WU1;kkZn@p zPJh0z=ZRIFf;TK!vAf!ob$3{zhC&TCJ?lgNEAon=ZM-|zzN->R+TCDqqaipx#{a+> zojXo*3AU?Eg+FgU-NfF(W}>{7k<<9G<8|iv<32A&jCoB!N%5jqaY8(gKfYq85Y?c; zc!lpG)f%awq*Qssn$TGLFPsU5$bzw}^>)I?^(R6bKTi1E_7hn-5YjSQXcAr9oPTQmVr{sjJh>qOgZEd6ci*e8OL|+O zf8;2d1sr~Y{Q2D4EV-abA2g0w*QiqybXA^X4{+mYtC$v}iz{#%$X8HsXX5-J-06uP zRJC6e~{8VM4sARvlO(-P5Ui~6gK#c>07)n4gI{c+>t$l&buDh|h(8>4Uo2hTT= zVpm|yhnQbdqath|nqI~4e|TDkL+gt57LcfS&b-j4S8-b5m)yy-4;zHt2n1{u(*Mhl z^9U&U@dwZWtZB5yXL@i9))2}f{6{KJn>7jtz+n_rMxcOUZ_d->sOo?Ewr)JWnrVZ1 z+Ftu}lb|@QQB-e}wdeh%Jv7c&pK$<3>Y0Vn@1fbbc6lpNeD`VE)l$ z9K5w?j)w4(6k!U5IBEI-fehSkTR6ijCFv zdNM)*`?Lo>hpzcDkfXil0K(H0IOD9_M+VD}-xi_lqAZn*&beyCd$o*-p1@sueU>)i zd0SMT<8SjlhHYp%h(CjD(ieETHh>dox?OEB2*x83sP8=IJ$t?Wg484d z%z!P3BNh7#4X(n;^n)Jms=i@wj^$+V-$vxFR#)o6aht_{Is~p{sh5j_60AM*E%vGU zlhLq_*!$S{p6k%bnxl;{j?XKY4m&}WCri%(y;C4vA!)~c(J=!%{oO$bA$Y8G&BUDr!Ch#A)o7Kcum5^HZa{JeVk zqU+#E!H-PJXP=?lNK28b7y6w;FI2kAPb_s;YH+ySJ^M?|LokA%WR!Pzns!NW@jfBH}@)X9k(Ns9k`T0j}v2? zW*qh|b$3`)O_reS%mrz{YFc4HF;itq=R3({x(n_k0}hQV8V3Bqe-ktBgW0*(F;+Ja zq*Vk2+!i6aV~OcDH#W{PNWfTy)#zg`9(>9Qh1bk}A~HJK1VYq2U<8I+ zxWH-lQzp|WyKR9uV+5OnWv1opSF`*9++B7i9;>;on}nPrj`{4m-)b^DZ!oJA(C4eK zvm5mnG?z8oT0D))c zP2)yXI<2m*s#Ol7>&JJ<6Vo)v-ujh=wmbTQpy>XYJD=1Y>4k)Z?rbq`4qEtz_bHBM zhl|JuV6$RV#&bB>{iLI#Gv0wcyZDkwB1a;UOQ=fPlza;t9g~nB4gL^`K@+gYFngg= zs8h^#^QPJ_l8X5^^o{L3&`Hm_NOif=LGOx&CX7Ns8f^q3ijp+h3tk;2Yxp&x&!hC< zGbXtQyVHD2N=}ci8}wWYUINfY%K0cu*#T?WmuDkH-MUYkg<5@tVy2c~?-enPSi**4`uQ`loBTk!EsWWRnCF8y)AVX1U_Z~|u;wo1u%9NCPl zBu{r@qpmW;<*64tC5&lLuXd-qYUGbIiY7`|mA=JTvS{P!<54Q+FB|!S`$feBh>%wRkLNPs?K19 zrAed}U@s@iVNWeFRx73wmgz~AHajzHyHQM?lN~4B+M4hkviP)KQj1T{A!uG-dEr(a z(yYB)S%*jHvx(56Y?NchEixyznCf1LWE6CZqHYt-*9y2y*uPBB>Hd38x0LzKLN*t3 zqX)=>ie`&7h@a1q+a{vVliPoO(8#!=A-^s^GCEz95+XWK(!Okl%Qh=NoA{GssAP7i zi>xJPec1nTTOu7?n`1FF%Tnx*_jDznu-*}-6`Max;t^E|2Frneu z6S}{5MsA-@$@BaHvh)9Je}0-IxTn~8g-idjM#vHVdo#uPX8V0k)E?!h;lae| zIB~NTLQfmMM+e{T6JRBLzD0z((_`EiM@t96X)&Jx}|Qc>1{Q;oG30Q{xdP?AV$%a*FfWW#@r!TMggxJ~S*e zgqL8PX!tr367Q@QzPBeyMjP|(uDmc~({nxPn}ixeHzAEk_Q@b>>&21kYKXL3fXaXV z`SWLI-E&6&88$KjDcWusDp|gB=jd~^8qSd4F;DOzW2-N-oc&P4Ce4nTHhxB|kcX0q z&g^ZKK+X?T*yR@*4HSh5=W!oh;)hJlEfvV5u*}R<%+_#K5tluSdQut_zn_MBF7e_4 zaGrgmA72=KixCKljEsy^FjYYf-R&8UpMTWpDV5s|ei!e<7lEDxy?-x_S165zdX8%} zO!3}bE@>mJjk~xngbsg8Gl`K<{?gHPmI`$#{|da)3r?Mae^3hqS7FXxq<&oHc{HDQ zVLnHF$e53I1WqYzh%eY7OBYc)0#nkJhR0Uxf!EQH?y>OuMa7`+gHgyd8L3(-gn$z> z@|FJ46}9xp71AwjZN(7X_Z1lr>4Pag#f8_6S|*&A(r{z9_^{B>(3<|#3rBC70dM+* zQhEE;(VHs4o7SA6#1}^GU7U1yQzhXb=VNcG05{CJFI;!*Js9CV;@=6cj@9 zk;J1Pt&s-LBg{P5VX2xqcu{5(henFS{!@Gp=9<<6fGSU$w?Wu4LQHplD;Lub>tjbP zp%_MhTaRqfjW4vm38T#s=TJ|k!GT4^d=j8bd$g!3rw9|0yG=Wl4_6+|jRuD>0sDXR zRTIXi7679bznh>rd=O3dpLuWtOxg_DcVqeolm3#1pmm<&>RgiFf!_ZE;U-mj%N4&Q znsL|CQ-BD|ojY0Pb)Gp7687pd@^>2r$!5vcfPgE$8f6=|%kUd@tTFGwstMHHf*h@I z9*3gzy=`cj4|+{O{d0S8ev5&M8mVSHX)!rD--50wKeZXMQf%JlFc~)NRkQFh8mZ#> zvC*gi99@0Y?;qhe!(FzCQfYBfrrlE7_Ou(0>6CC98`lFYV#3b^{wh@ag*A z@E6G9Qp9n!DgJ#m(((gzY0B=XBj+&Gg^$|gL_|c0ZoVlTbHk=WutQ*90W<{*;8~<0Wz-&COyC5aKg)a}R(Nsbe!}=J{ys&f2&8M%`)hJt@+(<D&1My z;D3r*oLawpq0f&ZI6N%;h@m68rE&-+yLo}gH-Hd%QVjPQJ5!bI&!?S5Tvl@>WCoQ? zGe(_BKFsQ6U9H^9cOgm+FEs4yNRo)0(*QhN2#Vfancxlu;!!?$?VdDwrt)8{-0ESV z`8r+Nk+TsX#ZHbef4jak#QniPYgBRCd86<7(5pH?_n9#+erI;j=My zVhmR`+F!j4R*2^n3{LYdi1vn$eqC@9Non2SIZ1vu98@3u~p|PCZwWBV~JZ`b`;i+>-pf@f{-hGnFg~yxTMJqQ4g6+gffO zp5xKyZd;dV_%>;3jF_k^3>2JzJLIsOk>Y%j5n ztajWVHr-Vs@zz?gkga4!o!Au)p9m`-+yq^+??wX!(Pr+#>qBF?B_1z4h&wwhY7i0JRRJwMd-?O+H4(oH;2iv{XgI5y0Bh2c_;PXT-Hgz)~xl?RTP_0h} zrDs?5vy?B~obSs|HX17H9&$`0R{ev4nAL?^;#ea(Nhd&cgEy}mbw1CNgo z+hjYZ>4W9$@Ehsjd{j;qBf(ZgiwMez@?TOn+^TFTAoydw-NJC`&&)$4Af7(9bsKBV z;r3fL_Sw|$kMur@-cd+;xtW&6$a9CaSm^W48FEhjnKQIkD;X<^4P{r z{Z5+NMo)sx+XsTXVN(#Zu`-{rd}kcB_P6ZpxphF~y#-wFP~IJ~jo~h_7r3o6W`i^3 zva0v(ch)*34F*?1upIz4T3@XVOw__#rmX9G<~i9%E$6jDbfL(zq@)XEPQE9V{TJZur`%l<`yhj-0r`$))7P)L zkSiKy-U^ZW^1F1^;w%zk;_tjW3lln(g%RQ5T*RZb4!ATD)h zYid%Xh!Zr7a&X}zSW;3PHb5iYTPo5SWFO!GJ8A&5)T1YTkem>xE~D?|zWm3OMh6oO z#|smH)81PEO|Ag`Whk*y&9YtS{T`$0m&V5N`nE<>CEvpn@QIM-D$hw#LPb4}az`Y0 z{K$yz>KK3o<8bNGn#Y7M-|%@BHC_9sUOMCK_ci6@Cav3PY1kj}+O9n2G{}zFw0#>z z6JN0fzW8%K_YTSh>9N*`*}p!(4A?gxFe>gtxNV)9z<4Tk`^_bz;$i#rCU(AG&1{Q< z7Hw9)=MkW}zC1EA6~ZL017^UulIm4TK_JiOU}sCerd79mdv`7hPtiy{!XP8(Bnh;) zf)H6ZcQvwd(Vmr=*{33aOIk+eseM$T@tH2#y?}FL!Ew}kvt6nFnqmGQtY;Wl!%l6k zc1;+)te0!!QhXd88A*9(0VsS6txV+&LDD^*A=?R`rQNb`^e*q;&-{|*`_E<&IQQ*< zvmCy@nQ#TWEYm*6V_b|!?tbov4u`-?8J;$_*=bxUBf(#dq<`{$toON4Ru}RxTa*=Z zAmF4d7`&k#YSj#KG5AC%K_?TLkjvJv7OlS-Z>!tkN^A+mJiU~!sq%pP$jT6k$bx;k zMQK}hWU@vz(cLW%-r+MJnXLcStHfnglcrN;kDj}Gk>790*Gxp2iOO#BED})c$ae&= zyAvJR#DKG-4c?8W3B8`<<4+wR+r|)5M0&tjtzotg9LfL^rXZvcdroc9Jp5EG_~y-jcxYSlXaU!5Lk;K(-S%Si{5OlD>hnsl!xcp&jXIL-t8Hm z3+#D-Ee4!6hn^GgZauz$`%U}MPSuRSHTzW2FN>A{1mxA$-jm%vqOtgE_>jLQ^GYaA zfTt`)q$_4iY?t1-c=(7zlkw8|5s+(tE!4(DtycaonEyS)HiOA)8SHFMv*UH|*WvG|>=B zz&NMv#!{_Nk?n;QWpGJ#Dr+}(Mn@n0}nL%G5)WkMGo#5kQW5gB^^wM zZ;R%J27Cp!?TRkf(HL)od#$^Z^>DGfHImzCxWW!T*mY$9WChjXGOKihx;DN8LnLd@ zAyX;$l1_Qm!M+0$KWQ1K8z#=F=RJF<9p01yTw}a}te#%#9(cVX)3Mr{14KV#Q&Q;9 zcFSD-`>cdFFpjX(mY2#)jRrW%$KNtRu7`FJYy5++; zoCCEGis^4Ooqs3#Q1eyQ-r$n_J?aF4c$PS~^+FDa`TejLHO5NjGNht4>}Tm97>ZW6 zn+nuEqh4$riBEO+J4mxrMe`UhQ5Z{3(UqpM?CF9gvs>ln@Bk?E`q-3~Qi%-=DU{bA z^Cu2y8QtDpsWY1V@}7i)S)<}RG5yuNiv8{RjaocbB`C)bBc*Rg3JZ#st9JU~yS@uG zCkP6l_C(M`hwKobynY1B!*a!1^kofjbb}ecut?P&!#4rFBc86!v|9rSsRGKuVpjQ* z`7(S)Q{~xCVXwwK#sTa?p&o`I9pGCyAk(=ViiU<(;Zu2+2GUhPdhsY1*#el7*3X=W z_(`_zA&`B(aG3~<;6&KZr7FnCjO(m*M?rQ}%Rn=V?ja0@X;%|lkPGC8_!kLA&cHE2 zJc4R#OH1Y)VR0q^Hx~RKDe>=avhqA2$Hl`_j~M{SD@$_#gZmDrk9K_s&KHN{mZ?{( znPSadt2w#O)8#TI1&*zErc1Q;fm1H$(ALrE&uyTZci$4xK2Viv-G(GobzY~n?Z%+) zB`zz*{bX4+HACb~!`G(!zUOMuPJ>A9q%EIIg_<}9sT&=yOUaqerl|3w8-Pm%C;A^c zo+*9xL|oC#cW-a+6!vOX#&1KwCyUMA1a;v05dB}QPwX?PyqedQuGqGgrl2FuhG zq(T(KN4>*4v?}86?#vBeo~LX&T*m}oSjKH{D^Umg+EZ6crcCUZeW#ocAC zW}6l&dsZyU#=4;@r}EjpcKcm*kanI@1-}gpBU^-AN=Av6JMjI31E-k>-&Z+9=K7W( zR<$7g2k?~T1-y1E#4x!D;`}H4xRjRE`VWSET=;l%LgLMfU6ZofG7ND)$8rT}j$GAXY^^ zjc~f+Trn&w=}|^G3-imvkwC%$R5%FAN*>JbhmUtq4h9TpIsE>AblHGKw?7)%g^q`U z0&<62=>Oy=|DDTh7;>OzACgG4Bh(H_O(oiUCGr`o`#a0w8xMureN9?I*&4cr!vaW{ z6uyIawv_fQ+iR`?=bUoaFo<95?~`MU|62EzP&9$Ewv(4%w;%SmuwSd`TBb$nEN zSdHr$d(~|s5iYSONky5{ZuOZe81Ck>Lsn|%)nEwsr_xo#=mM(!w8*2~NbIoxdwSyT z%?;zS7`xfT;0!pd)>DCsL4A6Vie?Q0NLlYxu%_|ci9^!8mK9vk{tbzAs=2H0p`e2_ zzYRo;0x~w8lIUfh()(`}AV2+>A-$`?z|34&zEsxK%6H&cte(Ho?|W#d$4J3<9sHAA z;0X!+1?gF3jb#wd6c3s|3N0E0TeJZ#sN9NgZ_OX?K_w+ZZbC$n3}B$qeq%uoVT$w^ zfz2~-N?QhxRII3B_n9)zl6;Zv(5~j`lvc(~fp$4?iyUFfO$r&Tw6|8qvYbHZE&?XB z)=2j4S6B2ChL9Dvv^!S^A_#4Rd|?b8I^_;XV)BKm&%&Kw$t^+%WAH5UJmMEsA!Q60 zpn*u$5sBT+>I3F{0_sCo9|5j@jG{MTBMtJQ6r`jcV^cfk6dDipGf)5cQO406ai_Bq zNq91Lu&fKXFp+3NZ@Pi;Y%#vVDlu>YT-633>psw%9+Q!gL8&Dh!kCGbE*|V}RwK>| z@;hcQ1|e&%A2PF-I*k5UWa`-Nb41}Wo}7l2Px z)zL``uT-Fh?>I9Ti|a8Ul$&Z5lxG@2ZK!eIjxgyJE{@^B9SDV0BDqKoWaU)+w$qK~ zl6AVYAN)OSZYy|l-luCvp4%%mVc=dOndw0+ceDN-CZ?uHAx`}bDAOc<9c9_^u$#*Q z*2i$P#;pf2)02Mp)%*Z?%`}I7C zag`xq6Om#WE&P*oatjwh+{;OP6kLX=sgh7s=c8)eofZhp-OX(|YcTz#1z~_{f|WIg z2!KIZBAn`rHmyC?lPY^PeZgaJuBT=HuT2;=aT}p!a4CM@gQdiOr$?hEDl$?%=_+p6 z`X>?+lLtKZ*i*U&SGdc>Lau)tvgLxXjdaUEVIZB>zS=g3T{SDF*C2S4N?jAeR(1Kg z7o@kA)0O~ygle!em(fVS^^&o@0udu%2IE(F?qN|cCjOe9nrMvXgdGdDd!1?V@32+YJnRTd#322S@+z;hK77GvCt!I@`QXS0PIo{m_+R;Y)KEqfiiG z{V}_L1z1ZZK-NRePL`3~>dxS6)Zgt~jdnb@8Ov48a$zkiSX4tJ>XFTCXRGi>C`;aK_u$Tfw?2-wcXC*g!OyP)IBwu9L>^Mg~W_gcJpUnXfQqurF`lrwcUh- ze=@R`FOtlQ;=MX#Av1K!cq0OG`vwvmZHAtuk>8gm?wZRNg#;>f{kQ$5Kq5cbFe5+M z^zC?7ac(~D_TLznIy~q@;F|p|1XhQ}m2C21J zUqJ!VgP*bS&3e=^$g>NN z>`oWaJ7|V)Kp&9Jkk=7wX=~pVm@vZJ6r8Ox;`n~dVa}x8U!T?K=x0o8bGRUG)U6!8 z{v~CX^JJQ4DxbZh)^FmF{=1x3e+43beV}+`Ct8)yrsN)XUxd8q<{fpMNozQVu!)~$ z6bU4#`NL3j01aH_zP94Sz;i5+C;=B&gYEa@SRjFDWsX{+zQ+Oy(8{P*&=Y7y}yjuPoQJorS7W-_8Z?f>Mc&+=(Q|&?v`&8S8QK> zOTq+1*Ec|$R>q+&hqmxGyzNIarja5`NHVtmOrjX#e8-?AjNKT%mgbyl20d6d5v^oL zXkez%8RQXiHXX4w#`HywE^8<43*oK=h}r`5a837J5MesAV^Y;z_SQFGyOwG1AKIf) zL}_}S(UFz7P1gZFD8$*Av&^u+;43%_^2)cUAdyB6V|QFP#iP?DQra&w#vZ_TmRYcd zeX6?CdUp|k={|LvA#D&^iV663zag|cykx1>Y{%zCPQ*(>+7dGxTPKyvt{H+^<`F=@ z1Cmf?PR@0iX#ksQ7yVdSW7h&fPiq2S@f_F^gWoOP+ZnegS`?}`haEPc+FS*)Hq{2? zzrqEJRXa--`TcWEHEt&|p$sR>+3Cazq>q$Y606&$u!1DTnTRv{Pj@n&KNLmvJ-iw@ z+2Ppr12G>Oc2gwtz>gq7P!Zg@+z6r3kffOY0zEB#B9y15g*A@K6;qZZeoXq8J#*Lp z?~9xuqk)B8h{<_-_A03%6u-6Wv_?RrAnt`Z`#jpIDAK%)qrgee&c5>iOHs%0KtM zD#~ku5e@&^n_&yJUW0}u{iOgM!r2Xi79o$!xTvb2pv&@ooEL7U?DZ_NpWv`cjoI}& zK2;j*nKe*OyX#?fm3>!+Dy>O(AyP)JABg*c9s%DeVHlzc8<}d>v}bN*uZpPs{msXre4X6}nDG9a~Vy%l>s!OBuju zmN!0;kILL!)xji@n`PR8?Dul=;figUy(}_up0h7L*rN~K4QuA-YUfeldb?c!z^=YI zawO-;3x*Pwk>{+E>iJ82w z%)jV;x_9NxxR`haL!|rxYfl~3+JVcmynOcB&dpM06@~AKX7L`zlx(vkx=QbH+*Uv8 zUuu2d%oRwW(Jd>>=S)yM9N>39W5%Y5|M zJ4YS}YcI)1+y_VSRi`;!!L!S~=JHnkz-)D|P6>ysx>E+hhdAQkd zJ0XDtj#}~Vt|P`;z>I~=-*d|!V-8Ng(db^aHnfKi0xe|Q`50HJi)3gRebP^@gk#H@ zSUw1HPoW%b_tEln1pz`ZdU!G;3aU#~tgWqURKzU~gJicgLYg5up8C>ztf#&)`Q>07 zuGxwYtb&P$yT{RuXjaP61h`yI9W4XSmY6He(DM`bERLWC*x_lD|9cxqM7yP(fM9)b zptln8JEZ%JGmfZe95)yR|H&H=>S#Fs)*b*F6O0^YLifMYzhF^qgqejJU{RB1nvdns z(1gNreeL?8cI@Rp0gq0FeXji22>rxhuAk{!upCZ43}hTe z{e*^L2HUy8?Gwwf2V6!N-Ozsa+riGwDG+G~`TJ`rD=V|7yt#ik|LB;zEc^x2n`gJ< zowjeplWIENpvg=OJ-P||*~eEZuh>&~9qvsq4Hz&?B3IGCqpNVb1c3tm-+h@pML9^3 zj752;4tE7GJB>gsGZm+{2SMeODpcDq#(GkvN%K$~Nk4whFxRIp{5nhf;5(#BEJBuQ z?uQQ_{yVu@HPS6jO%{bFeR*{76$pLQq`T#S(Yb0WIN$Xfx-$1!?e25 zhQVwiDNPXk3;>FIJSY~%ZaBtXM-I&#H;8oC8M zK>!bMzDPi@+b`CCl=lVZO{atCHVw4yD~5bvRlv7Iw&#zYM2Hg(+W|w^63%DV1_X$7 zE7#O4y^p#}u)7_(CAb>u4){L;}7Z+ji7NXl}P)2TT5Myy`&9 z587SnB_<~7U7xf(i`pDD%dqe+-rqouHUT!UKphfmi>&4qs(r~9k1QfiE}FnL^$Q1x z5Whn)n-OsT8=pxvU)p||y?{DTt|PF`;vb|y=137R<^#~sD845Tm0kjez#J}UvU{$r z{eDTN^`-C81bl_W8rEqiBq;|{I`lQ2)&n6n4jyO}M(r5zb`|8m6DgI6Y*LF{BSQ&AA|Ub1LB{A6vIrUU3BRE)zf^=VEXvWdpCPDl;cp_mMwadrbdNeU~1{1fimav zIP^=gFNJBk1ndFN)v?K}VWPGwjS+Hk!ef)-k7h&QES#K1k#-5xK5;96|0VhVUj=6W z|LZf6Qb@x?OG|6?ZH69YWY93)c)J)di#!iG=G*yWtTFW|66SGh9l9m_Qx2nRp#6(HK?slv( zy=IVtB6bX2*O*|me=H*XWiMc-pb#{h*fz|uTv@a&y-1^uzXGW+H}oIt=;(y5x1*O8 zyBM*D*&iFANRTV(uel*F2a*J)eV;R3l^eR%VL|%Xv%{yX%&|uFl`ILf*7Z#-{j#*; zHh<2IQXUNvoe$3zy?A(CW?>#{PQx#&jd8fDuswR{Oh-KVID4~dva6av8SckYtx;;! zFhQQSMK2j2o_Rd9vtX2+$+D#2m-T>tYvTgZ&~0XeaMB2e<-+dAbH_$U(}$rkm~OL7 zLU}^iB-^KG5q?3GezD!HtjEr7R^#zfr2+Y|b#bG?6T}`5WbRcsu;vy~JP~(deMZ~; zSN{d7V>ew1=eKlSDr`}mcFLM{@Nu_{9K>sd?_NdntDi$9U_sb$>Jx6gR%W_hv;jpz zI(j?)FZW+FR_sQX8;(!|&FED(!G^NJa|yyP=*m8}Pd`P8#BflqTDHC*5D<~_;@ zry-OA3+j4?q6Gy{2ywE(IoLJ?MIBv+DHn53VjKDbVDyuO9RUbe{@a+1n6nV-dBiKO op%6Me3k}b2>D>Q?{D57)m+gi*3nsP)Xz - - - - - - - - - - - - - - - - - - - kuadrantctl - CI/CD with Tekton and Argo CD - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

kuadrantctl - CI/CD with Tekton and Argo CD

-

This guide demonstrates setting up a CI/CD pipeline by using Tekton to deploy Kubernetes Gateway API and Kuadrant resources generated by kuadrantctl, from an OpenAPI definition. In this example, these resources are applied directly to the cluster where Tekton is running.

-

Prerequisites

-
    -
  • Kuadrant, and all of its prerequisites, installed on a Kubernetes or OpenShift cluster.
  • -
  • Tekton Pipelines installed on your cluster.
  • -
  • kubectl configured with access to communicate with your cluster.
  • -
  • Optional: Tekton CLI tkn for easier interaction with Tekton resources.
  • -
-

Procedure

-

Step 1 - Set up your namespace

-

Create a dedicated namespace as follows:

-
kubectl create namespace petstore
-
-

Step 2 - Create a Persistent Volume Claim

-

For this example, to store associated Tekton build artifacts, create a Persistent Volume Claim (PVC) in the petstore namespace as follows:

-
kubectl apply -n petstore -f - <<EOF
-apiVersion: v1
-kind: PersistentVolumeClaim
-metadata:
-  name: tekton-kuadrantctl-pvc
-  namespace: petstore
-spec:
-  accessModes:
-
-    - ReadWriteOnce
-  resources:
-    requests:
-      storage: 1Gi
-EOF
-
-

Step 3 - Define the Tekton Task

-

Define the task that outlines steps to clone a repository, generate Kuadrant and Kubernetes resources by using kuadrantctl, and apply them directly to the cluster as follows:

-
kubectl apply -f - <<'EOF'
-apiVersion: tekton.dev/v1beta1
-kind: Task
-metadata:
-  name: run-kuadrantctl
-  namespace: petstore
-spec:
-  params:
-
-    - name: gitRepoUrl
-      description: URL of the git repository to clone
-    - name: gitRevision
-      description: Git revision to checkout (branch, tag, sha)
-  workspaces:
-    - name: source
-      description: Workspace to checkout the git repo
-    - name: kubeconfig
-      description: Workspace containing kubeconfig for Kubernetes cluster access
-  steps:
-    - name: clean-workspace
-      image: alpine:latest
-      script: |
-        sh -c 'rm -rf $(workspaces.source.path)/* $(workspaces.source.path)/.[!.]* $(workspaces.source.path)/..?*'
-    - name: clone
-      image: alpine/git:latest
-      script: |
-        git clone $(params.gitRepoUrl) $(workspaces.source.path)
-        cd $(workspaces.source.path)
-        git checkout $(params.gitRevision)
-    - name: download-kuadrantctl
-      image: curlimages/curl:latest
-      script: |
-        ARCH=$(uname -m)
-        case $ARCH in
-        x86_64) BIN_ARCH="amd64";;
-        arm64) BIN_ARCH="arm64";;
-        aarch64) BIN_ARCH="arm64";;
-        *) echo "Unsupported architecture: $ARCH" && exit 1 ;;
-        esac
-        cd $(workspaces.source.path)
-        curl -LO "https://github.com/Kuadrant/kuadrantctl/releases/download/v0.2.3/kuadrantctl-v0.2.3-linux-$BIN_ARCH.tar.gz"
-        tar -xzf kuadrantctl-v0.2.3-linux-$BIN_ARCH.tar.gz
-    - name: run-kuadrantctl
-      image: alpine:latest
-      script: |
-        cd $(workspaces.source.path)
-        mkdir -p generated-resources
-        ./kuadrantctl generate kuadrant authpolicy --oas openapi.yaml | tee generated-resources/authpolicy.yaml
-        ./kuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml |  tee generated-resources/ratelimitpolicy.yaml
-        ./kuadrantctl generate gatewayapi httproute --oas openapi.yaml | tee generated-resources/httproute.yaml
-    - name: apply-resources
-      image: bitnami/kubectl
-      script: |
-        cd $(workspaces.source.path)
-        export KUADRANT_ZONE_ROOT_DOMAIN=example.com # domain name used in the HTTPRoute for the petstore sample app
-        for file in ./generated-resources/*.yaml; do
-          envsubst < "$file" | kubectl apply -n petstore -f - 
-        done
-EOF
-
-

Note: This example uses Tekton with kubectl to apply resources to a cluster. It is best to use a tool such as Argo CD to implement continuous delivery by using a GitOps approach. In this scenario, you would do the following:

-
    -
  • Use kuadrantctl to generate Kubernetes and Kuadrant resources as part a Tekton pipeline.
  • -
  • Commit these new resources to a Git repository.
  • -
  • Use ArgoCD to sync these changes from the Git repository to a Kubernetes or OpenShift cluster.
  • -
-

Step 4 - Create a Kubeconfig secret

-
-

Important: While this guide uses a kubeconfig secret for simplicity, do not use this in production environments. Instead, use a service account for enhanced security.

-
-

This example uses a kubeconfig secret and role bindings to demonstrate how to provide access for pushing generated resources to a cluster. However, for production setups, employing a service account is best.

-

To proceed, create a kubeconfig secret in the petstore namespace to provide Tekton with access to your Kubernetes cluster as follows:

-
kubectl create secret generic kubeconfig-secret --from-file=kubeconfig=/path/to/.kube/config -n petstore
-
-

Create an associated ClusterRole and ClusterRoleBinding as follows:

-
kubectl apply -n petstore -f - <<EOF
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
-  name: kuadrant-ci-example-full-access
-rules:
-
-- apiGroups: ["*"]
-  resources: ["*"]
-  verbs: ["*"]
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
-  name: kuadrant-ci-example-full-access-binding
-subjects:
-- kind: ServiceAccount
-  name: default
-  namespace: petstore
-roleRef:
-  kind: ClusterRole
-  name: kuadrant-ci-example-full-access
-  apiGroup: rbac.authorization.k8s.io
-EOF
-
-

Step 5 - Trigger the TaskRun

-

Execute the task from the petstore namespace, referencing the kubeconfig secret for cluster access as follows:

-

This example runs this task with the Kuadrant Petstore app: https://github.com/kuadrant/api-petstore.

-
kubectl apply -n petstore -f - <<EOF
-apiVersion: tekton.dev/v1beta1
-kind: TaskRun
-metadata:
-  name: run-kuadrantctl-taskrun
-  namespace: petstore
-spec:
-  taskRef:
-    name: run-kuadrantctl
-  params:
-
-    - name: gitRepoUrl
-      value: "https://github.com/kuadrant/api-petstore.git"
-    - name: gitRevision
-      value: "main"
-  workspaces:
-    - name: source
-      persistentVolumeClaim:
-        claimName: tekton-kuadrantctl-pvc
-    - name: kubeconfig
-      secret:
-        secretName: kubeconfig-secret
-EOF
-
-

If you have tkn installed, you can easily view the progress of the pipe run as follows:

-
tkn taskrun list -n petstore
-NAME                      STARTED          DURATION   STATUS
-run-kuadrantctl-taskrun   12 seconds ago   ---        Running(Pending)
-
-
tkn taskrun logs -n petstore -f
-
-
-[clone] Cloning into '/workspace/source'...
-[clone] Already on 'main'
-[clone] Your branch is up to date with 'origin/main'.
-
-[download-kuadrantctl]   % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
-[download-kuadrantctl]                                  Dload  Upload   Total   Spent    Left  Speed
-  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0
-100 21.4M  100 21.4M    0     0  6601k      0  0:00:03  0:00:03 --:--:-- 8756k
-
-[run-kuadrantctl] {"kind":"AuthPolicy","apiVersion":"kuadrant.io/v1beta2","metadata":{"name":"petstore","namespace":"petstore","creationTimestamp":null,"labels":{"deployment":"petstore","owner":"jbloggs"}},"spec":{"targetRef":{"group":"gateway.networking.k8s.io","kind":"HTTPRoute","name":"petstore","namespace":"petstore"},"routeSelectors":[{"matches":[{"path":{"type":"Exact","value":"/api/v3/store/admin"},"method":"GET"}]}],"rules":{"authentication":{"storeAdmin_api_key":{"credentials":{"customHeader":{"name":"api_key"}},"apiKey":{"selector":{"matchLabels":{"kuadrant.io/apikeys-by":"api_key"}}},"routeSelectors":[{"matches":[{"path":{"type":"Exact","value":"/api/v3/store/admin"},"method":"GET"}]}]}}}},"status":{}}
-[run-kuadrantctl] {"kind":"RateLimitPolicy","apiVersion":"kuadrant.io/v1beta2","metadata":{"name":"petstore","namespace":"petstore","creationTimestamp":null,"labels":{"deployment":"petstore","owner":"jbloggs"}},"spec":{"targetRef":{"group":"gateway.networking.k8s.io","kind":"HTTPRoute","name":"petstore","namespace":"petstore"},"limits":{"getInventory":{"routeSelectors":[{"matches":[{"path":{"type":"Exact","value":"/api/v3/store/inventory"},"method":"GET"}]}],"rates":[{"limit":10,"duration":10,"unit":"second"}]},"loginUser":{"routeSelectors":[{"matches":[{"path":{"type":"Exact","value":"/api/v3/user/login"},"method":"GET"}]}],"rates":[{"limit":2,"duration":10,"unit":"second"}]}}},"status":{}}
-[run-kuadrantctl] {"kind":"HTTPRoute","apiVersion":"gateway.networking.k8s.io/v1beta1","metadata":{"name":"petstore","namespace":"petstore","creationTimestamp":null,"labels":{"deployment":"petstore","owner":"jbloggs"}},"spec":{"parentRefs":[{"kind":"Gateway","namespace":"kuadrant-multi-cluster-gateways","name":"prod-web"}],"hostnames":["petstore.${KUADRANT_ZONE_ROOT_DOMAIN}"],"rules":[{"matches":[{"path":{"type":"Exact","value":"/api/v3/user/login"},"method":"GET"}],"backendRefs":[{"name":"petstore","namespace":"petstore","port":8080}]},{"matches":[{"path":{"type":"Exact","value":"/api/v3/store/admin"},"method":"GET"}],"backendRefs":[{"name":"petstore","namespace":"petstore","port":8080}]},{"matches":[{"path":{"type":"Exact","value":"/api/v3/store/inventory"},"method":"GET"}],"backendRefs":[{"name":"petstore","namespace":"petstore","port":8080}]}]},"status":{"parents":null}}
-
-[apply-resources] authpolicy.kuadrant.io/petstore created
-[apply-resources] httproute.gateway.networking.k8s.io/petstore created
-[apply-resources] ratelimitpolicy.kuadrant.io/petstore created
-
-

Step 6 - Cleanup

-

Clean up your resources as follows:

-
    -
  1. Remove the petstore namespace:
  2. -
  3. kubectl delete ns petstore
  4. -
  5. Remove the ClusterRole and ClusterRoleBinding:
  6. -
  7. kubectl delete clusterrole kuadrant-ci-example-full-access
  8. -
  9. kubectl delete clusterrolebinding kuadrant-ci-example-full-access-binding
  10. -
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/doc/openapi-apicurio/index.html b/dev/kuadrantctl/doc/openapi-apicurio/index.html deleted file mode 100644 index e7457813..00000000 --- a/dev/kuadrantctl/doc/openapi-apicurio/index.html +++ /dev/null @@ -1,1483 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Using Apicurio Studio with Kuadrant OAS extensions - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

Using Apicurio Studio with Kuadrant OAS extensions

-

You can use OpenAPI extensions to define extra functionality beyond what is covered by the standard OpenAPI specification. Extensions typically start with the x- prefix, for example, x-codegen. Kuadrant OpenAPI extensions start with the x-kuadrant prefix, and allow you to configure Kuadrant policy information alongside your API.

-

Apicurio Studio is a UI tool for visualizing and editing OpenAPI designs and definitions, which can visualize security details and custom extensions specified in your OpenAPI definition.

-

Prerequisites

- -

Procedure

-

Step 1 - Access your OpenAPI definition in Apicurio Studio

-

Open or import your OpenAPI definition in Apicurio Studio. On the Design tab, select the VENDOR-EXTENSiONS section to add an extension. Alternatively, you can use the Source tab to edit the API definition directly.

-

Step 2 - Add Kuadrant extensions to your API definition

-

The following configuration and extension points are supported by Apicurio Studio and the kuadrantctl CLI:

-

Generate an HTTP route

-

To generate an HTTPRoute for the API, add the following x-kuadrant block to your API definition in Apicurio Studio, replacing values to match your API details and the location of your Gateway:

-
x-kuadrant:
-    route:
-        name: petstore
-        namespace: petstore
-        hostnames:
-
-            - 'petstore.example.com'
-        parentRefs:
-            -   name: prod-web
-                namespace: kuadrant-multi-cluster-gateways
-                kind: Gateway
-
-

For more details, see Generate Gateway API HTTPRoute object from OpenAPI 3.

-

Generate an AuthPolicy

-

To generate an AuthPolicy, add a securityScheme to the components block in your API definition. The following securityScheme requires that an API key header is set:

-
    securitySchemes:
-        api_key:
-            type: apiKey
-            name: api_key
-            in: header
-
-

Although securityScheme is not an OpenAPI extension, it is used by kuadrantctl like the other extensions mentioned in this document.

-

When added, Apicurio Studio will display the following update in the SECURITY SCHEMES section:

-

Apicurio security requirements

-

For more details, see Generate Kuadrant AuthPolicy object from OpenAPI 3.

-

Generate a RateLimitPolicy

-

To generate a RateLimitPolicy for the API, add the following x-kuadrant block to a path in your API definition, replacing values to match your API details.

-
paths:
-    /:
-        x-kuadrant:
-            backendRefs:
-                -
-                    name: petstore
-                    namespace: petstore
-                    port: 8080
-            rate_limit:
-                rates:
-                    -
-                        limit: 10
-                        duration: 10
-                        unit: second
-
-

When added, Apicurio Studio will display the following update in the VENDOR-EXTENSiONS section for that specific path:

-

Apicurio RateLimitPolicy Vendor Extension

-

For more details, see Generate Kuadrant RateLimitPolicy object from OpenAPI 3.

-

Additional resources

- - - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/doc/openapi-kuadrant-extensions/index.html b/dev/kuadrantctl/doc/openapi-kuadrant-extensions/index.html deleted file mode 100644 index 161cac2b..00000000 --- a/dev/kuadrantctl/doc/openapi-kuadrant-extensions/index.html +++ /dev/null @@ -1,1430 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - OpenAPI 3.0.x Kuadrant extensions - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

OpenAPI 3.0.x Kuadrant extensions

-

This reference information shows examples of how to add Kuadrant extensions at the root, path, or operation level in an OpenAPI 3.0.x definition.

-

Root-level Kuadrant extension

-

You can add a Kuadrant extension at the root level of an OpenAPI definition. The following example shows an extension added for a petstore app:

-
x-kuadrant:
-  route:  ## HTTPRoute metadata
-    name: "petstore"
-    namespace: "petstore"
-    labels:  ## map[string]string
-      deployment: petstore
-    hostnames:  ## []gateway.networking.k8s.io/v1beta1.Hostname
-
-      - example.com
-    parentRefs:  ## []gateway.networking.k8s.io/v1beta1.ParentReference
-      - name: apiGateway
-        namespace: gateways
-
-

Path-level Kuadrant extension

-

You can add a Kuadrant extension at the path level of an OpenAPI definition. -This configuration at the path level is the default when there is no operation-level configuration. -The following example shows an extension added for a /cat path:

-
paths:
-  /cat:
-    x-kuadrant:  ## Path-level Kuadrant extension
-      disable: true  ## Remove from the HTTPRoute. Optional. Default: false
-      pathMatchType: Exact ## Specifies how to match against the path value. Valid values: [Exact;PathPrefix]. Optional. Default: Exact
-      backendRefs:  ## Backend references to be included in the HTTPRoute. []gateway.networking.k8s.io/v1beta1.HTTPBackendRef. Optional.
-
-        - name: petstore
-          port: 80
-          namespace: petstore
-      rate_limit:  ## Rate limit configuration. Optional.
-        rates:   ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.Rate
-          - limit: 1
-            duration: 10
-            unit: second
-        counters:   ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.CountextSelector
-          - auth.identity.username
-        when:   ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.WhenCondition
-          - selector: metadata.filter_metadata.envoy\.filters\.http\.ext_authz.identity.userid
-            operator: eq
-            value: alice
-
-

Operation-level Kuadrant extension

-

You can add a Kuadrant extension at the operation level of an OpenAPI definition. This extension uses the same schema as the path-level Kuadrant extension. The following example shows an extension added for a get operation:

-
paths:
-  /cat:
-    get:
-      x-kuadrant:  ## Operation-level Kuadrant extension
-        disable: true  ## Remove from the HTTPRoute. Optional. Default: path level "disable" value.
-        pathMatchType: Exact ## Specifies how to match against the path value. Valid values: [Exact;PathPrefix]. Optional. Default: Exact.
-        backendRefs:  ## Backend references to be included in the HTTPRoute. Optional.
-
-          - name: petstore
-            port: 80
-            namespace: petstore
-        rate_limit:  ## Rate limit configuration. Optional.
-          rates:   ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.Rate
-            - limit: 1
-              duration: 10
-              unit: second
-          counters:   ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.CountextSelector
-            - auth.identity.username
-          when:   ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.WhenCondition
-            - selector: metadata.filter_metadata.envoy\.filters\.http\.ext_authz.identity.userid
-              operator: eq
-              value: alice
-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/doc/openapi-openshift-dev-spaces/index.html b/dev/kuadrantctl/doc/openapi-openshift-dev-spaces/index.html deleted file mode 100644 index 3618c573..00000000 --- a/dev/kuadrantctl/doc/openapi-openshift-dev-spaces/index.html +++ /dev/null @@ -1,1823 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Integrating Kuadrant OAS extensions with Red Hat OpenShift Dev Spaces - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

Integrating Kuadrant OAS extensions with Red Hat OpenShift Dev Spaces

-

OpenAPI extensions enhance the standard OpenAPI specification by adding custom functionality. Kuadrant OpenAPI extensions are identified by the x-kuadrant prefix. You can use OpenAPI extensions to integrate Kuadrant policies directly into your API definitions.

-

Red Hat OpenShift Dev Spaces provides a browser-based, cloud-native IDE that supports rapid and decentralized development in container-based environments. This tutorial demonstrates how to use OpenShift Dev Spaces to modify an OpenAPI definition by incorporating Kuadrant policies, and then use the kuadrantctl CLI to create Kubernetes resources for both Gateway API and Kuadrant.

-

Prerequisites

-
    -
  • -

    You must have access to one of the following Dev Spaces instances:

    -
  • -
  • -

    A self-hosted OpenShift Dev Spaces instance.

    -
  • -
  • An OpenShift Dev Spaces instance provided by the Red Hat Developer Sandbox.
  • -
-

Procedure

-

Step 1 - Setting up your workspace

-

Create a workspace in Dev Spaces for your project as follows:

-
    -
  1. Fork the following repository: https://github.com/Kuadrant/blank-petstore.
  2. -
  3. In Dev Spaces, select Create Workspace, and enter the URL of your forked repository. For example: https://github.com/<your-username>/blank-petstore.git.
  4. -
  5. Click Create & Open.
  6. -
-

Step 2 - Configuring VS Code in Dev Spaces

-

For this tutorial, you will perform the following tasks:

-
    -
  • Install kuadrantctl in your workspace to demonstrate Kubernetes resource generation from your modified OpenAPI definition.
  • -
  • Optional: Configure Git with your username and email to enable pushing changes back to your repository.
  • -
-

Install the kuadrantctl CLI

-

To install kuadrantctl in your Dev Spaces workspace, enter the following command:

-
curl -sL "https://github.com/kuadrant/kuadrantctl/releases/download/v0.2.3/kuadrantctl-v0.2.3-linux-amd64.tar.gz" | tar xz -C /home/user/.local/bin
-
-

This command installs kuadrantctl in /home/user/.local/bin, which is included in the container's $PATH by default.

-

Optional: Configuring Git

-

If you plan to push changes back to your repository, configure your Git username and email as follows:

-
git config --global user.email "foo@example.com"
-git config --global user.name "Foo Example"
-
-

Step 3 - Adding Kuadrant policies to your OpenAPI definition

-

After creating your workspace, Dev Spaces will launch VS Code loaded with your forked repository. Navigate to the openapi.yaml file in the sample app to begin modifications.

-

Kuadrant policies overview

-

You will enhance your API definition by applying Kuadrant policies to the following endpoints:

-
    -
  • /pet/findByStatus
  • -
  • /user/login
  • -
  • /store/inventory
  • -
-

In this tutorial, you will add Kuadrant policies to your API definition as follows:

-
    -
  • Generate an HTTPRoute to expose these three routes for an existing Gateway.
  • -
  • Add API key authentication for the /user/login route, using a Kuadrant AuthPolicy and OAS securitySchemes.
  • -
  • Add a Kuadrant RateLimitPolicy to the /store/inventory endpoint, to limit the amount of requests this endpoint can receive.
  • -
-

Defining a Gateway

-

Use the x-kuadrant extension in the root level to specify a Gateway. This information will be used to generate HTTPRoutes at the path level. For example:

-
x-kuadrant:
-  route:  ## HTTPRoute metadata
-    name: "petstore"
-    namespace: "petstore"
-    labels:  ## map[string]string
-      deployment: petstore
-    hostnames:  ## []gateway.networking.k8s.io/v1beta1.Hostname
-
-      - example.com
-    parentRefs:  ## []gateway.networking.k8s.io/v1beta1.ParentReference
-      - name: apiGateway
-        namespace: gateways
-
-

Specifying HTTPRoutes for each path

-

For each path, add an x-kuadrant extension with backendRefs to link your routes to your paths as follows:

-
  /pet/findByStatus:
-    x-kuadrant:
-      backendRefs:
-
-      - name: petstore
-        namespace: petstore
-        port: 8080
-    get:
-      # ...
-
-
  /user/login:
-    x-kuadrant:
-      backendRefs:
-
-      - name: petstore
-        namespace: petstore
-        port: 8080
-    get:
-      # ...
-
-
  /store/inventory:
-    x-kuadrant:
-      backendRefs:
-
-      - name: petstore
-        namespace: petstore
-        port: 8080
-    get:
-      # ...
-
-

Note: The x-kuadrant extension at the path level applies to all HTTP methods defined in the path. For method-specific policies, move the extension inside the relevant HTTP method block, for example, get or post.

-

Implementing AuthPolicy and security schemes

-

To secure the /user/login endpoint with API key authentication, use the following configuration:

-
  /user/login:
-    # ...
-    get:
-      security:
-
-      - api_key: []
-
-
components:
-  schemas:
-    # ...
-  securitySchemes:
-    api_key:
-      type: apiKey
-      name: api_key
-      in: header
-
-

This configuration generates an AuthPolicy that references an API key stored in a labeled Secret:

-

apiVersion: v1
-kind: Secret
-metadata:
-  name: petstore-api-key
-  namespace: petstore
-  labels:
-    authorino.kuadrant.io/managed-by: authorino
-    kuadrant.io/apikeys-by: api_key
-stringData:
-  api_key: secret
-type: Opaque
-
-For simplicity, this example uses a simple, static API key for your app.

-

Applying a RateLimitPolicy to an endpoint

-

To enforce rate limiting on the /store/inventory endpoint, add the following x-kuadrant extension:

-
  /store/inventory:
-    get:
-      # ...
-      x-kuadrant:
-        backendRefs:
-          # ...
-        rate_limit:
-          rates:
-
-          - limit: 10
-            duration: 10
-            unit: second
-
-

This limits to 10 requests every 10 seconds for the /store/inventory endpoint.

-

Step 4 - Generate Kubernetes resources by using kuadrantctl

-

With your extensions in place, you can use kuadrantctl to generate the follollowing Kubernetes resources:

-
    -
  • An HTTPRoute for your petstore app for each of your endpoints.
  • -
  • An AuthPolicy with a simple, static API key from a secret for the /user/login endpoint.
  • -
  • A RateLimitPolicy with a rate limit of 10 requests every 10 seconds for the /store/inventory endpoint.
  • -
-

In Dev Spaces, select ☰ > Terminal > New Terminal, and run the following commands:

-
Generate an HTTPRoute
-
kuadrantctl generate gatewayapi httproute --oas openapi.yaml
-
-

This command outputs the following HTTPRoute:

-
kind: HTTPRoute
-apiVersion: gateway.networking.k8s.io/v1beta1
-metadata:
-  name: petstore
-  namespace: petstore
-  creationTimestamp: null
-  labels:
-    deployment: petstore
-spec:
-  parentRefs:
-
-    - namespace: gateways
-      name: apiGateway
-  hostnames:
-    - example.com
-  rules:
-    - matches:
-        - path:
-            type: Exact
-            value: /api/v3/pet/findByStatus
-          method: GET
-      backendRefs:
-        - name: petstore
-          namespace: petstore
-          port: 8080
-    - matches:
-        - path:
-            type: Exact
-            value: /api/v3/store/inventory
-          method: GET
-      backendRefs:
-        - name: petstore
-          namespace: petstore
-          port: 8080
-    - matches:
-        - path:
-            type: Exact
-            value: /api/v3/user/login
-          method: GET
-      backendRefs:
-        - name: petstore
-          namespace: petstore
-          port: 8080
-status:
-  parents: null
-
-
Generate an AuthPolicy
-
kuadrantctl generate kuadrant authpolicy --oas openapi.yaml
-
-

This command outputs the following AuthPolicy:

-
apiVersion: kuadrant.io/v1beta2
-kind: AuthPolicy
-metadata:
-  name: petstore
-  namespace: petstore
-  creationTimestamp: null
-  labels:
-    deployment: petstore
-spec:
-  targetRef:
-    group: gateway.networking.k8s.io
-    kind: HTTPRoute
-    name: petstore
-    namespace: petstore
-  routeSelectors:
-
-    - matches:
-        - path:
-            type: Exact
-            value: /api/v3/user/login
-          method: GET
-  rules:
-    authentication:
-      GETuserlogin_api_key:
-        credentials:
-          customHeader:
-            name: api_key
-        apiKey:
-          selector:
-            matchLabels:
-              kuadrant.io/apikeys-by: api_key
-        routeSelectors:
-          - matches:
-              - path:
-                  type: Exact
-                  value: /api/v3/user/login
-                method: GET
-status: {}
-
-
Generate a RateLimitPolicy
-
kuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml
-
-

This command outputs the following RateLimitPolicy:

-
apiVersion: kuadrant.io/v1beta2
-kind: RateLimitPolicy
-metadata:
-  name: petstore
-  namespace: petstore
-  creationTimestamp: null
-  labels:
-    deployment: petstore
-spec:
-  targetRef:
-    group: gateway.networking.k8s.io
-    kind: HTTPRoute
-    name: petstore
-    namespace: petstore
-  limits:
-    GETstoreinventory:
-      routeSelectors:
-
-        - matches:
-            - path:
-                type: Exact
-                value: /api/v3/store/inventory
-              method: GET
-      rates:
-        - limit: 10
-          duration: 10
-          unit: second
-status: {}
-
-

Step 5 - Applying resources to the app

-
-

Note: By default, the oc and kubectl commands in Dev Spaces target the cluster running Dev Spaces. If you want to apply resources to another cluster, you must log in with oc or kubectl to another cluster, and pass a different --context to these commands to apply resources to another cluster.

-
-

You can now apply these policies to a running app by using kubectl or oc. If Dev Spaces is running on a cluster where Kuadrant is also installed, you can apply these resources as follows:

-
kuadrantctl generate gatewayapi httproute --oas openapi.yaml | kubectl apply -f -
-kuadrantctl generate kuadrant authpolicy --oas openapi.yaml | kubectl apply -f -
-kuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | kubectl apply -f -
-
-

Alternatively, you can use kuadrantctl as part of a CI/CD pipeline. For more details, see the kuadrantctl CI/CD guide.

-

If you completed the optional Git configuration step, you can enter git commit to commit the these changes and push them to your fork.

-

Additional resources

-

For more details, see the following documentation on using x-kuadrant OAS extensions with kuadrantctl:

- - - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/kuadrantctl/index.html b/dev/kuadrantctl/index.html deleted file mode 100644 index f293f65b..00000000 --- a/dev/kuadrantctl/index.html +++ /dev/null @@ -1,1798 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - kuadrantctl - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

kuadrantctl

-

License

-

kuadrantctl is a CLI tool for managing Kuadrant configurations and resources.

-

Installing

-

kuadrantctl can be installed either by downloading pre-compiled binaries or by compiling from source. For most users, downloading the binary is the easiest and recommended method.

-

Installing Pre-compiled Binaries

-
    -
  1. Download the latest binary for your platform from the kuadrantctl Releases page.
  2. -
  3. Unpack the binary.
  4. -
  5. Move it to a directory in your $PATH so that it can be executed from anywhere.
  6. -
-

Compiling from Source

-

If you prefer to compile from source or are contributing to the project, you can install kuadrantctl using make install. This method requires Golang 1.21 or newer.

-

It is possible to use the make target install to compile from source. From root of the repository, run

-
make install
-
-

This will compile kuadrantctl and install it in the bin directory at root of directory. It will also ensure the correct version of the binary is displayed . It can be ran using ./bin/kuadrantctl .

-

Usage

-

Below is a high-level overview of its commands, along with links to detailed documentation for more complex commands.

-

General Syntax

-
kuadrantctl [command] [subcommand] [flags]
-
-

Commands Overview

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CommandDescription
completionGenerate autocompletion scripts for the specified shell
generateCommands related to Kubernetes Gateway API and Kuadrant resource generation from OpenAPI 3.x specifications
topologyCommand related to Kuadrant topology
helpHelp about any command
versionPrint the version number of kuadrantctl
-

Flags

- - - - - - - - - - - - - - - - - -
FlagDescription
-h, --helpHelp for kuadrantctl
-v, --verboseEnable verbose output
-

Commands Detail

-

completion

-

Generate an autocompletion script for the specified shell.

- - - - - - - - - - - - - - - - - - - - - - - - - -
SubcommandDescription
bashGenerate script for Bash
fishGenerate script for Fish
powershellGenerate script for PowerShell
zshGenerate script for Zsh
-

generate

-

Commands related to Kubernetes Gateway API and Kuadrant resource generation from OpenAPI 3.x specifications.

- - - - - - - - - - - - - - - - - -
SubcommandDescription
gatewayapiGenerate Gateway API resources
kuadrantGenerate Kuadrant resources
-
generate gatewayapi
-

Generate Gateway API resources from an OpenAPI 3.x specification

- - - - - - - - - - - - - - - -
SubcommandDescriptionFlags
httprouteGenerate Gateway API HTTPRoute from OpenAPI 3.0.X--oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o Output format: 'yaml' or 'json'. (default "yaml")
-

topology

-

Export and visualize kuadrant topology

-

Usage

-
$ kuadrantctl topology -h
-Export and visualize kuadrant topology
-
-Usage:
-  kuadrantctl topology [flags]
-
-Flags:
-  -d, --dot string         Graphviz DOT output file
-  -h, --help               help for topology
-  -n, --namespace string   Topology's namespace (default "kuadrant-system")
-  -o, --output string      SVG image output file
-
-Global Flags:
-  -v, --verbose   verbose output
-
-
generate kuadrant
-

Generate Kuadrant resources from an OpenAPI 3.x specification

- - - - - - - - - - - - - - - - - - - - -
SubcommandDescriptionFlags
authpolicyGenerate a Kuadrant AuthPolicy from an OpenAPI 3.0.x specification--oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o Output format: 'yaml' or 'json'. (default "yaml")
ratelimitpolicyGenerate Kuadrant RateLimitPolicy from an OpenAPI 3.0.x specification--oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o Output format: 'yaml' or 'json'. (default "yaml")
-

version

-

Print the version number of kuadrantctl.

-

No additional flags or subcommands.

-

Additional Guides

-

Generating Gateway API HTTPRoute Objects

-
    -
  • Generates Gateway API HTTPRoute objects from an OpenAPI Specification (OAS) 3.x.
  • -
  • Supports reading from a file, URL, or stdin.
  • -
  • Example usages and more information can be found in the detailed guide.
  • -
-

Generating Kuadrant AuthPolicy Objects

-
    -
  • Generates Kuadrant AuthPolicy objects for managing API authentication.
  • -
  • Supports openIdConnect and apiKey types from the OpenAPI Security Scheme Object.
  • -
  • Example usages and more information can be found in the detailed guide.
  • -
-

Generating Kuadrant RateLimitPolicy Objects

-
    -
  • Generates Kuadrant RateLimitPolicy objects for managing API rate limiting.
  • -
  • Supports reading from a file, URL, or stdin.
  • -
  • Example usages and more information can be found in the detailed guide.
  • -
-

For more detailed information about each command, including options and usage examples, use kuadrantctl [command] --help.

-

Using with GitHub Actions

-
- name: Install kuadrantctl
-  uses: jaxxstorm/action-install-gh-release@v1.10.0
-  with: # Grab the latest version
-    repo: Kuadrant/kuadrantctl
-
-

Commands

- -

Contributing

-

The Development guide describes how to build the kuadrantctl CLI and how to test your changes before submitting a patch or opening a PR.

-

Licensing

-

This software is licensed under the Apache 2.0 license.

-

See the LICENSE and NOTICE files that should have been provided along with this software for details.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador-operator/doc/custom-image/index.html b/dev/limitador-operator/doc/custom-image/index.html deleted file mode 100644 index 378049d0..00000000 --- a/dev/limitador-operator/doc/custom-image/index.html +++ /dev/null @@ -1,1387 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Custom Image - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Custom Image

-

Currently, the limitador image being used in the deployment is read from different sources with some order of precedence:

-
    -
  • If Limtador CR's spec.image is set -> image = ${spec.image}
  • -
  • If Limtador CR's spec.version is set -> image = quay.io/kuadrant/limitador:${spec.version} (note the repo is hardcoded)
  • -
  • if RELATED_IMAGE_LIMITADOR env var is set -> image = $RELATED_IMAGE_LIMITADOR
  • -
  • else: hardcoded to quay.io/kuadrant/limitador:latest
  • -
-

The spec.image field is not meant to be used in production environments. -It is meant to be used for dev/testing purposes. -The main drawback of the spec.image usage is that upgrades cannot be supported as the -limitador operator cannot ensure the operation to be safe.

-
---
-apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-instance-1
-spec:
-  image: example.com/myorg/limitador-repo:custom-image-v1
-EOF
-
-

Pull an Image from a Private Registry

-

To pull an image from a private container image registry or repository, you need to provide credentials.

-

Create a Secret of type kubernetes.io/dockerconfigjson by providing credentials. -For example, using kubectl tool with the following command line:

-
kubectl create secret docker-registry regcred --docker-server=<your-registry-server> --docker-username=<your-name> --docker-password=<your-pword>
-
-

That will create a secret named regcred.

-

Deploy limitador instance with the imagePullSecrets field having a reference to the regcred.

-
---
-apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-instance-1
-spec:
-  image: example.com/myorg/limitador-repo:custom-image-v1
-  imagePullSecrets:
-
-  - name: regcred
-
-
-

NOTE: It is mandatory that the secret and limitador CR are created in the same namespace.

-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador-operator/doc/development/index.html b/dev/limitador-operator/doc/development/index.html deleted file mode 100644 index bfd6dbe5..00000000 --- a/dev/limitador-operator/doc/development/index.html +++ /dev/null @@ -1,1725 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Development Guide - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

Development Guide

-

Technology stack required for development

- -

Build

-
make
-
-

Run locally

-

You need an active session open to a kubernetes cluster.

-

Optionally, run kind with local-env-setup.

-
make local-env-setup
-
-

Then, run the operator locally

-
make run
-
-

Deploy the operator in a deployment object

-
make local-setup
-
-

Deploy the operator using OLM

-

You can deploy the operator using OLM just running a few commands. -No need to build any image. Kuadrant engineering team provides latest and -released version tagged images. They are available in -the Quay.io/Kuadrant image repository.

-

Create kind cluster

-
make kind-create-cluster
-
-

Deploy OLM system

-
make install-olm
-
-

Deploy the operator using OLM. The make deploy-catalog target accepts the following variables:

- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
CATALOG_IMGCatalog image URLquay.io/kuadrant/limitador-operator-catalog:latest
-
make deploy-catalog [CATALOG_IMG=quay.io/kuadrant/limitador-operator-catalog:latest]
-
-

Build custom OLM catalog

-

If you want to deploy (using OLM) a custom limitador operator, you need to build your own catalog.

-

Build operator bundle image

-

The make bundle target accepts the following variables:

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault valueNotes
IMGOperator image URLquay.io/kuadrant/limitador-operator:latest
VERSIONBundle version0.0.0
RELATED_IMAGE_LIMITADORLimitador bundle URLquay.io/kuadrant/limitador:latestLIMITADOR_VERSION var could be use to build this URL providing the tag
CHANNELSBundle channels used in the bundle, comma separatedalpha
DEFAULT_CHANNELThe default channel used in the bundlealpha
-
    -
  • Build the bundle manifests
  • -
-
make bundle [IMG=quay.io/kuadrant/limitador-operator:latest] \
-            [VERSION=0.0.0] \
-            [RELATED_IMAGE_LIMITADOR=quay.io/kuadrant/limitador:latest] \
-            [CHANNELS=alpha] \
-            [DEFAULT_CHANNEL=alpha]
-
-
    -
  • Build the bundle image from the manifests
  • -
- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
BUNDLE_IMGOperator bundle image URLquay.io/kuadrant/limitador-operator-bundle:latest
-
make bundle-build [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]
-
-
    -
  • Push the bundle image to a registry
  • -
- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
BUNDLE_IMGOperator bundle image URLquay.io/kuadrant/limitador-operator-bundle:latest
-
make bundle-push [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]
-
-

Build custom catalog

-

The catalog format will be File-based Catalog.

-

Make sure all the required bundles are pushed to the registry. It is required by the opm tool.

-

The make catalog target accepts the following variables:

- - - - - - - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
BUNDLE_IMGOperator bundle image URLquay.io/kuadrant/limitador-operator-bundle:latest
DEFAULT_CHANNELCatalog default channelalpha
-
make catalog [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] [DEFAULT_CHANNEL=alpha]
-
-
    -
  • Build the catalog image from the manifests
  • -
- - - - - - - - - - - - - - - -
Makefile VariableDescriptionDefault value
CATALOG_IMGOperator catalog image URLquay.io/kuadrant/limitador-operator-catalog:latest
-
make catalog-build [CATALOG_IMG=quay.io/kuadrant/limitador-operator-catalog:latest]
-
-
    -
  • Push the catalog image to a registry
  • -
-
make catalog-push [CATALOG_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]
-
-

You can try out your custom catalog image following the steps of the -Deploy the operator using OLM section.

-

Cleaning up

-
make local-cleanup
-
-

Run tests

-

Unittests

-
make test-unit
-
-

Optionally, add TEST_NAME makefile variable to run specific test

-
make test-unit TEST_NAME=TestConstants
-
-

or even subtest

-
make test-unit TEST_NAME=TestLimitIndexEquals/empty_indexes_are_equal
-
-

Integration tests

-

You need an active session open to a kubernetes cluster.

-

Optionally, run local cluster with kind

-
make local-env-setup
-
-

Run integration tests

-
make test-integration
-
-

All tests

-

You need an active session open to a kubernetes cluster.

-

Optionally, run local cluster with kind

-
make local-env-setup
-
-

Run all tests

-
make test
-
-

Lint tests

-
make run-lint
-
-

(Un)Install Limitador CRD

-

You need an active session open to a kubernetes cluster.

-

Remove CRDs

-
make uninstall
-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador-operator/doc/logging/index.html b/dev/limitador-operator/doc/logging/index.html deleted file mode 100644 index 4335a21c..00000000 --- a/dev/limitador-operator/doc/logging/index.html +++ /dev/null @@ -1,1343 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Logging - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Logging

-

The limitador operator outputs 3 levels of log messages: (from lowest to highest level)

-
    -
  1. debug
  2. -
  3. info (default)
  4. -
  5. error
  6. -
-

info logging is restricted to high-level information. Actions like creating, deleting or updating kubernetes resources will be logged with reduced details about the corresponding objects, and without any further detailed logs of the steps in between, except for errors.

-

Only debug logging will include processing details.

-

To configure the desired log level, set the environment variable LOG_LEVEL to one of the supported values listed above. Default log level is info.

-

Apart from log level, the controller can output messages to the logs in 2 different formats:

-
    -
  • production (default): each line is a parseable JSON object with properties {"level":string, "ts":int, "msg":string, "logger":string, extra values...}
  • -
  • development: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\t<log-level>\t<logger>\t<message>\t{extra-values-as-json}
  • -
-

To configure the desired log mode, set the environment variable LOG_MODE to one of the supported values listed above. Default log mode is production.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador-operator/doc/rate-limit-headers/index.html b/dev/limitador-operator/doc/rate-limit-headers/index.html deleted file mode 100644 index 0fb64add..00000000 --- a/dev/limitador-operator/doc/rate-limit-headers/index.html +++ /dev/null @@ -1,1345 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Rate Limit Headers - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Rate Limit Headers

-

It enables RateLimit Header Fields for HTTP as specified in -Rate Limit Headers Draft

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  rateLimitHeaders: DRAFT_VERSION_03
-
-

Current valid values are:

- -

By default, when spec.rateLimitHeaders is null, --rate-limit-headers command line arg is not -included in the limitador's deployment.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador-operator/doc/resource-requirements/index.html b/dev/limitador-operator/doc/resource-requirements/index.html deleted file mode 100644 index f2881e5b..00000000 --- a/dev/limitador-operator/doc/resource-requirements/index.html +++ /dev/null @@ -1,1434 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Resource Requirements - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Resource Requirements

-

The default resource requirement for Limitador deployments is specified in Limitador v1alpha1 API reference -and will be applied if the resource requirement is not set in the spec.

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  listener:
-    http:
-      port: 8080
-    grpc:
-      port: 8081
-  limits:
-
-    - conditions: ["get_toy == 'yes'"]
-      max_value: 2
-      namespace: toystore-app
-      seconds: 30
-      variables: []  
-
- - - - - - - - - - - - - - - - - - - - - -
Fieldjson/yaml fieldTypeRequiredDefault valueDescription
ResourceRequirementsresourceRequirements*corev1.ResourceRequirementsNo{"limits": {"cpu": "500m","memory": "64Mi"},"requests": {"cpu": "250m","memory": "32Mi"}}Limitador deployment resource requirements
-

Example with resource limits

-

The resource requests and limits for the deployment can be set like the following:

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  listener:
-    http:
-      port: 8080
-    grpc:
-      port: 8081
-  limits:
-
-    - conditions: ["get_toy == 'yes'"]
-      max_value: 2
-      namespace: toystore-app
-      seconds: 30
-      variables: []
-  resourceRequirements:
-    limits:
-      cpu: 200m
-      memory: 400Mi
-    requests:
-      cpu: 101m  
-      memory: 201Mi    
-
-

To specify the deployment without resource requests or limits, set an empty struct {} to the field: -

apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  listener:
-    http:
-      port: 8080
-    grpc:
-      port: 8081
-  limits:
-
-    - conditions: [ "get_toy == 'yes'" ]
-      max_value: 2
-      namespace: toystore-app
-      seconds: 30
-      variables: []
-  resourceRequirements: {}
-

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador-operator/doc/storage/index.html b/dev/limitador-operator/doc/storage/index.html deleted file mode 100644 index e6972a9e..00000000 --- a/dev/limitador-operator/doc/storage/index.html +++ /dev/null @@ -1,1615 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Storage - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Storage

-

Limitador limits counters are stored in a backend storage. This is In contrast to the storage of -the limits themselves, which are always stored in ephemeral memory. Limitador's operator -supports several storage configurations:

-
    -
  • In-Memory: ephemeral and cannot be shared
  • -
  • Redis: Persistent (depending on the redis storage configuration) and can be shared
  • -
  • Redis Cached: Persistent (depending on the redis storage configuration) and can be shared
  • -
  • Disk: Persistent (depending on the underlying disk persistence capabilities) and cannot be shared
  • -
-

In-Memory

-

Counters are held in Limitador (ephemeral)

-

In-Memory is the default option defined by the Limitador's Operator.

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  storage: null
-
-

For any of those, one should store the URL of the Redis service, inside a K8s opaque -Secret.

-
apiVersion: v1
-kind: Secret
-metadata:
-  name: redisconfig
-stringData:
-  URL: redis://127.0.0.1/a # Redis URL of its running instance
-type: Opaque
-
-

Redis

-

Uses Redis to store counters.

-

Selected when spec.storage.redis is not null.

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  storage:
-    redis:
-      configSecretRef: # The secret reference storing the URL for Redis
-        name: redisconfig
-
-

The URL of the Redis service is provided inside a K8s opaque -Secret. -The secret is required to be in the same namespace as the Limitador CR.

-
apiVersion: v1
-kind: Secret
-metadata:
-  name: redisconfig
-stringData:
-  URL: redis://127.0.0.1/a # Redis URL of its running instance
-type: Opaque
-
-

Note: Limitador's Operator will only read the URL field of the secret.

-

Redis Cached

-

Uses Redis to store counters, with an in-memory cache.

-

Selected when spec.storage.redis-cached is not null.

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  storage:
-    redis-cached:
-      configSecretRef: # The secret reference storing the URL for Redis
-        name: redisconfig
-
-

The URL of the Redis service is provided inside a K8s opaque -Secret. -The secret is required to be in the same namespace as the Limitador CR. -

apiVersion: v1
-kind: Secret
-metadata:
-  name: redisconfig
-stringData:
-  URL: redis://127.0.0.1/a # Redis URL of its running instance
-type: Opaque
-

-

Note: Limitador's Operator will only read the URL field of the secret.

-

Additionally, caching options can be specified in the spec.storage.redis-cached.options field.

-

Options

- - - - - - - - - - - - - - - - - - - - - - - - - -
OptionDescription
batch-sizeSize of entries to flush in as single flush [default: 100]
flush-periodFlushing period for counters in milliseconds [default: 1000]
max-cachedMaximum amount of counters cached [default: 10000]
response-timeoutTimeout for Redis commands in milliseconds [default: 350]
-

For example:

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  storage:
-    redis-cached:
-      configSecretRef: # The secret reference storing the URL for Redis
-        name: redisconfig
-      options: # Every option is optional
-        batch-size: 50
-        max-cached: 5000
-
-

Disk

-

Counters are held on disk (persistent). -Kubernetes Persistent Volumes -will be used to store counters.

-

Selected when spec.storage.disk is not null.

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  storage:
-    disk: {}
-
-

Additionally, disk options can be specified in the spec.storage.disk.persistentVolumeClaim -and spec.storage.disk.optimize fields.

-

Persistent Volume Claim Options

-

spec.storage.disk.persistentVolumeClaim field is an object with the following fields.

- - - - - - - - - - - - - - - - - - - - - -
FieldDescription
storageClassNameStorageClass of the storage offered by cluster administrators [default: default storage class of the cluster]
resourcesThe minimum resources the volume should have. Resources will not take any effect when VolumeName is provided. This parameter is not updateable when the underlying PV is not resizable. [default: 1Gi]
volumeNameThe binding reference to the existing PersistentVolume backing this claim [default: null]
-

Example:

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  storage:
-    disk:
-      persistentVolumeClaim:
-        storageClassName: "customClass"
-        resources:
-          requests: 2Gi
-
-

Optimize

-

Defines the valid optimization option of the disk persistence type.

-

spec.storage.disk.optimize field is a string type with the following valid values:

- - - - - - - - - - - - - - - - - -
OptionDescription
throughputOptimizes for higher throughput. Default
diskOptimizes for disk usage
-

Example:

-
apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  storage:
-    disk:
-      optimize: disk
-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador-operator/doc/tracing/index.html b/dev/limitador-operator/doc/tracing/index.html deleted file mode 100644 index b298c926..00000000 --- a/dev/limitador-operator/doc/tracing/index.html +++ /dev/null @@ -1,1353 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Tracing - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Tracing

-

Limitador offers distributed tracing enablement using the .spec.tracing CR configuration:

-
---
-apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  listener:
-    http:
-      port: 8080
-    grpc:
-      port: 8081
-  limits:
-
-    - conditions: ["get_toy == 'yes'"]
-      max_value: 2
-      namespace: toystore-app
-      seconds: 30
-      variables: []
-  verbosity: 3
-  tracing:
-    endpoint: rpc://my-otlp-collector:4317
-
-

Currently limitador only supports collectors using the OpenTelemetry Protocol with TLS disabled. The endpoint configuration option should contain the scheme, host and port of the service. The quantity and level of the information provided by the spans is configured via the verbosity argument.

-

Limitador tracing example

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador-operator/index.html b/dev/limitador-operator/index.html deleted file mode 100644 index b34d20bf..00000000 --- a/dev/limitador-operator/index.html +++ /dev/null @@ -1,1444 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Limitador Operator - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Limitador Operator

-

License -FOSSA Status -codecov

-

Overview

-

The Operator to manage Limitador deployments.

-

CustomResourceDefinitions

-
    -
  • Limitador, which defines a desired Limitador deployment.
  • -
-

Limitador CRD

-

Limitador v1alpha1 API reference

-

Example:

-
---
-apiVersion: limitador.kuadrant.io/v1alpha1
-kind: Limitador
-metadata:
-  name: limitador-sample
-spec:
-  listener:
-    http:
-      port: 8080
-    grpc:
-      port: 8081
-  limits:
-
-    - conditions: ["get_toy == 'yes'"]
-      max_value: 2
-      namespace: toystore-app
-      seconds: 30
-      variables: []
-
-

Features

- -

Contributing

-

The Development guide describes how to build the operator and -how to test your changes before submitting a patch or opening a PR.

-

Join us on the #kuadrant channel in the Kubernetes Slack workspace, -for live discussions about the roadmap and more.

-

Licensing

-

This software is licensed under the Apache 2.0 license.

-

See the LICENSE and NOTICE files that should have been provided along with this software for details.

-

FOSSA Status

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/LICENSE b/dev/limitador/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/dev/limitador/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/dev/limitador/doc/how-it-works/index.html b/dev/limitador/doc/how-it-works/index.html deleted file mode 100644 index 4d11dc05..00000000 --- a/dev/limitador/doc/how-it-works/index.html +++ /dev/null @@ -1,1419 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - How it works - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

How it works

- -

How it works

-

Limitador will increment counters for all Limits that apply, if any of these counter is above its Limit's -max_value the request will be considered to be rate limited. So think of it as if the most restrictive limit -configuration will apply.

-

Limitador will evaluate whether a Limit applies against its namespace, its conditions and whether all variables are -resolvable. The namespace for the descriptors is defined by the domain field from the -service.ratelimit.v3.RateLimitRequest. -For each matching Limit, its counter is increased and checked against the Limit max_value.

-

One example to illustrate:

-

Let's say we have one rate limit:

-
conditions: [ "descriptors[0].KEY_A == 'VALUE_A'" ]
-max_value: 1
-seconds: 60
-variables: []
-namespace: example.org
-
-

Limitador Server receives a request with one descriptor with two entries:

-
domain: example.org
-descriptors:
-
-  - entries:
-    - KEY_A: VALUE_A
-    - OTHER_KEY: OTHER_VALUE
-
-

The counter's condition all match. Then, the counter will be increased and the limit checked. -If the limit is exceeded, the request will be rejected with 429 Too Many Requests, -otherwise accepted.

-

Note that the counter is being activated even though it does not match all the entries of the -descriptor. The same rule applies for the variables field.

-

Conditions are CEL expressions evaluating to a bool value.

-

The variables field is a list of keys. -The matching rule is defined just as the existence of the list of descriptor entries with the -same key values. If variables is variables: ["descriptors[0].A", "descriptors[0].B", "descriptors[0].C]", -the limit will match if the first descriptor has at least three entries with the same A, B, C keys.

-

Few examples to illustrate.

-

Having the following descriptors:

-
domain: example.org
-descriptors:
-
-  - entries:
-    - KEY_A: VALUE_A
-    - OTHER_KEY: OTHER_VALUE
-
-

the following counters would not be activated.

-

conditions: [ "descriptors[0].KEY_B == 'VALUE_B'" ]
-max_value: 1
-seconds: 60
-variables: []
-namespace: example.org
-
-Reason: conditions key does not exist

-

conditions:
-
-  - "descriptors[0].KEY_A == 'VALUE_A'"
-  - "descriptors[0].OTHER_KEY == 'WRONG_VALUE'"
-max_value: 1
-seconds: 60
-variables: []
-namespace: example.org
-
-Reason: not all the conditions match

-

conditions: []
-max_value: 1
-seconds: 60
-variables: [ "descriptors[0].MY_VAR" ]
-namespace: example.org
-
-Reason: the variable name does not exist

-

conditions: [ "descriptors[0].KEY_B == 'VALUE_B'" ]
-max_value: 1
-seconds: 60
-variables: [ "descriptors[0].MY_VAR" ]
-namespace: example.org
-
-Reason: Both variables and conditions must match. In this particular case, only conditions match

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/doc/migrations/conditions/index.html b/dev/limitador/doc/migrations/conditions/index.html deleted file mode 100644 index d898d2eb..00000000 --- a/dev/limitador/doc/migrations/conditions/index.html +++ /dev/null @@ -1,1348 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - New condition syntax - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

New condition syntax

-

With limitador-server version 1.0.0 (and the limitador crate version 0.3.0), the syntax for conditions within -limit definitions has changed.

-

Note! This synthax has been deprecated as of version 2.0.0

-

Changes when working with Limitador Server versions 1.x

-

The new syntax

-

The new syntax formalizes what part of an expression is the identifier and which is the value to test against. -Identifiers are simple string value, while string literals are to be demarcated by single quotes (') or double quotes -(") so that foo == " bar" now makes it explicit that the value is to be prefixed with a space character.

-

A few remarks:

-
    -
  • Only string values are supported, as that's what they really are
  • -
  • There is no escape character sequence supported in string literals
  • -
  • A new operator has been added, !=
  • -
-

The issue with the deprecated syntax

-

The previous syntax wouldn't differentiate between values and the identifier, so that foo == bar was valid. In this -case foo was the identifier of the variable, while bar was the value to evaluate it against. Whitespaces before and -after the operator == would be equally important. SO that foo == bar would test for a foo variable being equal -to bar where the trailing whitespace after the identifier, and the one prefixing the value, would have been -evaluated.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/doc/server/configuration/index.html b/dev/limitador/doc/server/configuration/index.html deleted file mode 100644 index 5dfb46ef..00000000 --- a/dev/limitador/doc/server/configuration/index.html +++ /dev/null @@ -1,1883 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Limitador configuration - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

Limitador configuration

-

Command line configuration

-

The preferred way of starting and configuring the Limitador server is using the command line:

-
Rate Limiting Server
-
-Usage: limitador-server [OPTIONS] <LIMITS_FILE> [STORAGE]
-
-STORAGES:
-  memory        Counters are held in Limitador (ephemeral)
-  disk          Counters are held on disk (persistent)
-  redis         Uses Redis to store counters
-  redis_cached  Uses Redis to store counters, with an in-memory cache
-
-Arguments:
-  <LIMITS_FILE>  The limit file to use
-
-Options:
-  -b, --rls-ip <ip>
-          The IP to listen on for RLS [default: 0.0.0.0]
-  -p, --rls-port <port>
-          The port to listen on for RLS [default: 8081]
-  -B, --http-ip <http_ip>
-          The IP to listen on for HTTP [default: 0.0.0.0]
-  -P, --http-port <http_port>
-          The port to listen on for HTTP [default: 8080]
-  -l, --limit-name-in-labels
-          Include the Limit Name in prometheus label
-      --tracing-endpoint <tracing_endpoint>
-          The host for the tracing service [default: ]
-  -v...
-          Sets the level of verbosity
-      --validate
-          Validates the LIMITS_FILE and exits
-  -H, --rate-limit-headers <rate_limit_headers>
-          Enables rate limit response headers [default: NONE] [possible values: NONE, DRAFT_VERSION_03]
-      --grpc-reflection-service
-          Enables gRPC server reflection service
-  -h, --help
-          Print help
-  -V, --version
-          Print version
-
-

The values used are authoritative over any environment variables independently set.

-

Limit definitions

-

The LIMITS_FILE provided is the source of truth for all the limits that will be enforced. The file location will be -monitored by the server for any changes and be hot reloaded. If the changes are invalid, they will be ignored on hot -reload, or the server will fail to start.

-

The LIMITS_FILE's format

-

When starting the server, you point it to a LIMITS_FILE, which is expected to be a yaml file with an array of -limit definitions, with the following format:

-
---
-"$schema": http://json-schema.org/draft-04/schema#
-type: object
-properties:
-  name:
-    type: string
-  namespace:
-    type: string
-  seconds:
-    type: integer
-  max_value:
-    type: integer
-  conditions:
-    type: array
-    items:
-
-      - type: string
-  variables:
-    type: array
-    items:
-      - type: string
-required:
-  - namespace
-  - seconds
-  - max_value
-  - conditions
-  - variables
-
-

Here is an example of such a limit definition:

-
- namespace: example.org
-  max_value: 10
-  seconds: 60
-  conditions:
-    - "descriptors[0].req_method == 'GET'"
-  variables:
-    - descriptors[0].user_id
-
-
    -
  • namespace namespaces the limit, will generally be the domain, see here
  • -
  • seconds is the duration for which the limit applies, in seconds: e.g. 60 is a span of time of one minute
  • -
  • max_value is the actual limit, e.g. 100 would limit to 100 requests
  • -
  • name lets the user optionally name the limit
  • -
  • variables is an array of variables, which once resolved, will be used to qualify counters for the limit, - e.g. api_key to limit per api keys
  • -
  • conditions is an array of conditions, which once evaluated will decide whether to apply the limit or not
  • -
-

condition syntax

-

Each condition is an expression producing a boolean value (true or false). All conditions must evaluate to -true for the limit to be applied on a request.

-

These predicates are CEL Expressions that operate on the context provided by the Limit itself -(it's id and namefields), along with the descriptors from Envoy's - -service.ratelimit.v3.RateLimitRequest, -each of which being exposed a List of Map with both keys and values as String.

-

Counter storages

-

Limitador will load all the limit definitions from the LIMITS_FILE and keep these in memory. To enforce these -limits, Limitador needs to track requests in the form of counters. There would be at least one counter per limit, but -that number grows when variables are used to qualify counters per some arbitrary values.

-

memory

-

As the name implies, Limitador will keep all counters in memory. This yields the best results in terms of latency as -well as accuracy. By default, only up to 1000 "concurrent" counters will be kept around, evicting the oldest entries. -"Concurrent" in this context means counters that need to exist at the "same time", based of the period of the limit, -as "expired" counters are discarded.

-

This storage is ephemeral, as if the process is restarted, all the counters are lost and effectively "reset" all the -limits as if no traffic had been rate limited, which can be fine for short-lived limits, less for longer-lived ones.

-

redis

-

When you want persistence of your counters, such as for disaster recovery or across restarts, using redis will store -the counters in a redis instance using the provided URL. Increments to individual counters is made within redis -itself, providing accuracy over these, races tho can occur when multiple Limitador servers are used against a single -redis and using "stacked" limits (i.e. over different periods). Latency is also impacted, as it results in one -additional hop to talk to redis and maintain the counters.

-

TLS Support

-

Connect to a redis instance using the rediss:// URL scheme.

-

To enable insecure mode, append #insecure at the end of the URL. For example:

-
limitador-server <LIMITS_FILE> redis rediss://127.0.0.1/#insecure"
-
-

Authentication

-

To enable authentication, use the username and password properties of the URL scheme. For example:

-
limitador-server <LIMITS_FILE> redis redis://my-username:my-password@127.0.0.1"
-
-

when the username is omitted, redis assumes default user. For example:

-
limitador-server <LIMITS_FILE> redis redis://:my-password@127.0.0.1"
-
-

Usage

-
Uses Redis to store counters
-
-Usage: limitador-server <LIMITS_FILE> redis <URL>
-
-Arguments:
-  <URL>  Redis URL to use
-
-Options:
-  -h, --help  Print help
-
-

redis_cached

-

In order to avoid some communication overhead to redis, redis_cached adds an in memory caching layer within the -Limitador servers. This lowers the latency, but sacrifices some accuracy as it will not only cache counters, but also -coalesce counters updates to redis over time. See this configuration option for more -information.

-

TLS Support

-

Connect to a redis instance using the rediss:// URL scheme.

-

To enable insecure mode, append #insecure at the end of the URL. For example:

-
limitador-server <LIMITS_FILE> redis rediss://127.0.0.1/#insecure"
-
-

Authentication

-

To enable authentication, use the username and password properties of the URL scheme. For example:

-
limitador-server <LIMITS_FILE> redis redis://my-username:my-password@127.0.0.1"
-
-

when the username is omitted, redis assumes default user. For example:

-
limitador-server <LIMITS_FILE> redis redis://:my-password@127.0.0.1"
-
-

Usage

-
Uses Redis to store counters, with an in-memory cache
-
-Usage: limitador-server <LIMITS_FILE> redis_cached [OPTIONS] <URL>
-
-Arguments:
-  <URL>  Redis URL to use
-
-Options:
-      --batch-size <batch>          Size of entries to flush in as single flush [default: 100]
-      --flush-period <flush>        Flushing period for counters in milliseconds [default: 1000]
-      --max-cached <max>            Maximum amount of counters cached [default: 10000]
-      --response-timeout <timeout>  Timeout for Redis commands in milliseconds [default: 350]
-  -h, --help                        Print help
-
-

disk

-

Disk storage using RocksDB. Counters are held on disk (persistent).

-
Counters are held on disk (persistent)
-
-Usage: limitador-server <LIMITS_FILE> disk [OPTIONS] <PATH>
-
-Arguments:
-  <PATH>  Path to counter DB
-
-Options:
-      --optimize <OPTIMIZE>  Optimizes either to save disk space or higher throughput [default: throughput] [possible values: throughput, disk]
-  -h, --help                 Print help
-
-

For an in-depth coverage of the different topologies supported and how they affect the behavior, see the -topologies' document.

-

Configuration using environment variables

-

The Limitador server has some options that can be configured with environment variables. These will override the -default values the server uses. Any argument used when starting the server will prevail over the -environment variables.

-

ENVOY_RLS_HOST

-
    -
  • Host where the Envoy RLS server listens.
  • -
  • Optional. Defaults to "0.0.0.0".
  • -
  • Format: string.
  • -
-

ENVOY_RLS_PORT

-
    -
  • Port where the Envoy RLS server listens.
  • -
  • Optional. Defaults to 8081.
  • -
  • Format: integer.
  • -
-

HTTP_API_HOST

-
    -
  • Host where the HTTP server listens.
  • -
  • Optional. Defaults to "0.0.0.0".
  • -
  • Format: string.
  • -
-

HTTP_API_PORT

-
    -
  • Port where the HTTP API listens.
  • -
  • Optional. Defaults to 8080.
  • -
  • Format: integer.
  • -
-

LIMITS_FILE

-
    -
  • YAML file that contains the limits to create when Limitador boots. If the -limits specified already have counters associated, Limitador will not delete them. -Changes to the file will be picked up by the running server.
  • -
  • Required. No default
  • -
  • Format: string, file path.
  • -
-

LIMIT_NAME_IN_PROMETHEUS_LABELS

-
    -
  • Enables using limit names as labels in Prometheus metrics. This is disabled by -default because for a few limits it should be fine, but it could become a -problem when defining lots of limits. See the caution note in the Prometheus -docs
  • -
  • Optional. Disabled by default.
  • -
  • Format: bool, set to "1" to enable.
  • -
-

TRACING_ENDPOINT

-
    -
  • The endpoint of the OTLP tracing collector (scheme://host:port).
  • -
  • Optional. Default to "" (tracing disabled)
  • -
  • Format: string
  • -
-

REDIS_LOCAL_CACHE_ENABLED

-
    -
  • Enables a storage implementation that uses Redis, but also caches some data in -memory. The idea is to improve throughput and latencies by caching the counters -in memory to reduce the number of accesses to Redis. To achieve that, this mode -sacrifices some rate-limit accuracy. This mode does two things:
      -
    • Batches counter updates. Instead of updating the counters on every -request, it updates them in memory and commits them to Redis in batches. The -flushing interval can be configured with the -REDIS_LOCAL_CACHE_FLUSHING_PERIOD_MS -env. The trade-off is that when running several instances of Limitador, -other instances will not become aware of the counter updates until they're -committed to Redis.
    • -
    • Caches counters. Instead of fetching the value of a counter every time -it's needed, the value is cached for a configurable period. The trade-off is -that when running several instances of Limitador, an instance will not -become aware of the counter updates other instances do while the value is -cached. When a counter is already at 0 (limit exceeded), it's cached until -it expires in Redis. In this case, no matter what other instances do, we -know that the quota will not be reestablished until the key expires in -Redis, so in this case, rate-limit accuracy is not affected. When a counter -has still some quota remaining the situation is different, that's why we can -tune for how long it will be cached. The formula is as follows: -MIN(ttl_in_redis/REDIS_LOCAL_CACHE_TTL_RATIO_CACHED_COUNTERS, -REDIS_LOCAL_CACHE_MAX_TTL_CACHED_COUNTERS_MS). -For example, let's image that the current TTL (time remaining until the -limit resets) in Redis for a counter is 10 seconds, and we set the ratio to -2, and the max time for 30s. In this case, the counter will be cached for 5s -(min(10/2, 30)). During those 5s, Limitador will not fetch the value of that -counter from Redis, so it will answer faster, but it will also miss the -updates done by other instances, so it can go over the limits in that 5s -interval.
    • -
    -
  • -
  • Optional. Disabled by default.
  • -
  • Format: set to "1" to enable.
  • -
  • Note: "REDIS_URL" needs to be set.
  • -
-

REDIS_LOCAL_CACHE_FLUSHING_PERIOD_MS

-
    -
  • Used to configure the maximum flushing period. See -REDIS_LOCAL_CACHE_ENABLED. This env only applies -when "REDIS_LOCAL_CACHE_ENABLED" == 1.
  • -
  • Optional. Defaults to 1000.
  • -
  • Format: integer. Duration in milliseconds.
  • -
-

REDIS_LOCAL_CACHE_BATCH_SIZE

-
    -
  • Used to configure the maximum number of counters to update in a flush. See -REDIS_LOCAL_CACHE_ENABLED. This env only applies -when "REDIS_LOCAL_CACHE_ENABLED" == 1.
  • -
  • Optional. Defaults to 100.
  • -
  • Format: integer.
  • -
-

REDIS_URL

-
    -
  • Redis URL. Required only when you want to use Redis to store the limits.
  • -
  • Optional. By default, Limitador stores the limits in memory and does not -require Redis.
  • -
  • Format: string, URL in the format of "redis://127.0.0.1:6379".
  • -
-

RUST_LOG

-
    -
  • Defines the log level.
  • -
  • Optional. Defaults to "error".
  • -
  • Format: enum: "debug", "error", "info", "warn", or "trace".
  • -
-

RATE_LIMIT_HEADERS

-
    -
  • Enables rate limit response headers. Only supported by the RLS server.
  • -
  • Optional. Defaults to "NONE".
  • -
  • Must be one of:
  • -
  • "NONE" - Does not add any additional headers to the http response.
  • -
  • "DRAFT_VERSION_03". Adds response headers per https://datatracker.ietf.org/doc/id/draft-polli-ratelimit-headers-03.html
  • -
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/doc/topologies/index.html b/dev/limitador/doc/topologies/index.html deleted file mode 100644 index 5026779e..00000000 --- a/dev/limitador/doc/topologies/index.html +++ /dev/null @@ -1,1427 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Deployment topologies - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Deployment topologies

-

In-memory

-

Redis

-

Redis active-active storage

-

The RedisLabs version of Redis supports active-active -replication. -Limitador is compatible with that deployment mode, but there are a few things to -take into account regarding limit accuracy.

-

Considerations

-

With an active-active deployment, the data needs to be replicated between -instances. An update in an instance takes a short time to be reflected in the -other. That time lag depends mainly on the network speed between the Redis -instances, and it affects the accuracy of the rate-limiting performed by -Limitador because it can go over limits while the updates of the counters are -being replicated.

-

The impact of that greatly depends on the use case. With limits of a few -seconds, and a low number of hits, we could easily go over limits. On the other -hand, if we have defined limits with a high number of hits and a long period, -the effect will be basically negligible. For example, if we define a limit of -one hour, and we know that the data takes around one second to be replicated, -the accuracy loss is going to be negligible.

-

Set up

-

In order to try active-active replication, you can follow this tutorial from -RedisLabs.

-

Disk

-

Disk storage using RocksDB. Counters are held on disk (persistent).

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/index.html b/dev/limitador/index.html deleted file mode 100644 index d7723f38..00000000 --- a/dev/limitador/index.html +++ /dev/null @@ -1,1481 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Limitador - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Limitador

-

Limitador GH Workflow -docs.rs -Crates.io -Docker Repository on Quay -codecov -FOSSA Status

-

Limitador is a generic rate-limiter written in Rust. It can be used as a -library, or as a service. The service exposes HTTP endpoints to apply and observe -limits. Limitador can be used with Envoy because it also exposes a grpc service, on a different -port, that implements the Envoy Rate Limit protocol (v3).

- -

Limitador is under active development, and its API has not been stabilized yet.

-

Getting started

- -

Rust library

-

Add this to your Cargo.toml: -

[dependencies]
-limitador = { version = "0.3.0" }
-

-

For more information, see the README of the crate

-

Server

-

Run with Docker (replace latest with the version you want): -

docker run --rm --net=host -it quay.io/kuadrant/limitador:v1.0.0
-

-

Run locally: -

cargo run --release --bin limitador-server -- --help
-

-

Refer to the help message on how to start up the server. More information are available -in the server's README.md

-

Development

-

Build

-
cargo build
-
-

Run the tests

-

Some tests need a redis deployed in localhost:6379. You can run it in Docker with: -

docker run --rm -p 6379:6379 -it redis
-

-

Then, run the tests:

-
cargo test --all-features
-
-

or you can run tests disabling the "redis storage" feature: -

cd limitador; cargo test --no-default-features
-

-

Contributing

-

Join us on the #kuadrant channel in the Kubernetes Slack workspace, -for live discussions about the roadmap and more.

-

License

-

Apache 2.0 License

-

FOSSA Status

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/limitador-server/docs/http_server_spec.json b/dev/limitador/limitador-server/docs/http_server_spec.json deleted file mode 100644 index 4a17d7ac..00000000 --- a/dev/limitador/limitador-server/docs/http_server_spec.json +++ /dev/null @@ -1,305 +0,0 @@ -{ - "swagger": "2.0", - "definitions": { - "CheckAndReportInfo": { - "type": "object", - "properties": { - "delta": { - "type": "integer", - "format": "int64" - }, - "namespace": { - "type": "string" - }, - "values": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "response_headers": { - "type": "string", - "enum": ["none", "DraftVersion03"] - } - }, - "required": [ - "delta", - "namespace", - "values" - ] - }, - "Counter": { - "type": "object", - "properties": { - "expires_in_seconds": { - "type": "integer", - "format": "int64" - }, - "limit": { - "type": "object", - "properties": { - "conditions": { - "type": "array", - "items": { - "type": "string" - } - }, - "max_value": { - "type": "integer", - "format": "int64" - }, - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "seconds": { - "type": "integer", - "format": "int64" - }, - "variables": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "required": [ - "conditions", - "max_value", - "namespace", - "seconds", - "variables" - ] - }, - "remaining": { - "type": "integer", - "format": "int64" - }, - "set_variables": { - "type": "object", - "additionalProperties": { - "type": "string" - } - } - }, - "required": [ - "limit", - "set_variables" - ] - }, - "Limit": { - "type": "object", - "properties": { - "conditions": { - "type": "array", - "items": { - "type": "string" - } - }, - "max_value": { - "type": "integer", - "format": "int64" - }, - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "seconds": { - "type": "integer", - "format": "int64" - }, - "variables": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "required": [ - "conditions", - "max_value", - "namespace", - "seconds", - "variables" - ] - } - }, - "paths": { - "/check": { - "post": { - "responses": { - "200": { - "description": "OK", - "schema": {} - }, - "429": { - "description": "Too Many Requests" - }, - "500": { - "description": "Internal Server Error" - } - }, - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/CheckAndReportInfo" - } - } - ] - } - }, - "/check_and_report": { - "post": { - "responses": { - "200": { - "description": "OK", - "schema": {} - }, - "429": { - "description": "Too Many Requests" - }, - "500": { - "description": "Internal Server Error" - } - }, - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/CheckAndReportInfo" - } - } - ] - } - }, - "/counters/{namespace}": { - "get": { - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Counter" - } - } - }, - "429": { - "description": "Too Many Requests" - }, - "500": { - "description": "Internal Server Error" - } - }, - "parameters": [ - { - "in": "path", - "name": "namespace", - "required": true, - "type": "string" - } - ] - } - }, - "/limits/{namespace}": { - "get": { - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/Limit" - } - } - }, - "429": { - "description": "Too Many Requests" - }, - "500": { - "description": "Internal Server Error" - } - }, - "parameters": [ - { - "in": "path", - "name": "namespace", - "required": true, - "type": "string" - } - ] - }, - "delete": { - "responses": { - "200": { - "description": "OK", - "schema": {} - }, - "429": { - "description": "Too Many Requests" - }, - "500": { - "description": "Internal Server Error" - } - }, - "parameters": [ - { - "in": "path", - "name": "namespace", - "required": true, - "type": "string" - } - ] - } - }, - "/report": { - "post": { - "responses": { - "200": { - "description": "OK", - "schema": {} - }, - "429": { - "description": "Too Many Requests" - }, - "500": { - "description": "Internal Server Error" - } - }, - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/CheckAndReportInfo" - } - } - ] - } - }, - "/status": { - "get": { - "responses": { - "200": { - "description": "OK", - "schema": {} - } - } - } - } - }, - "info": { - "version": "1.0.0", - "title": "Limitador server endpoints" - } -} diff --git a/dev/limitador/limitador-server/index.html b/dev/limitador/limitador-server/index.html deleted file mode 100644 index 492e335c..00000000 --- a/dev/limitador/limitador-server/index.html +++ /dev/null @@ -1,1412 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Limitador (server) - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Limitador (server)

-

Docker Repository on Quay

-

By default, Limitador starts the HTTP server in localhost:8080, and the grpc -service that implements the Envoy Rate Limit protocol in localhost:8081. That -can be configured with these ENVs: ENVOY_RLS_HOST, ENVOY_RLS_PORT, -HTTP_API_HOST, and HTTP_API_PORT.

-

Or using the command line arguments:

-
Rate Limiting Server
-
-Usage: limitador-server [OPTIONS] <LIMITS_FILE> [STORAGE]
-
-STORAGES:
-  memory        Counters are held in Limitador (ephemeral)
-  disk          Counters are held on disk (persistent)
-  redis         Uses Redis to store counters
-  redis_cached  Uses Redis to store counters, with an in-memory cache
-
-Arguments:
-  <LIMITS_FILE>  The limit file to use
-
-Options:
-  -b, --rls-ip <ip>
-          The IP to listen on for RLS [default: 0.0.0.0]
-  -p, --rls-port <port>
-          The port to listen on for RLS [default: 8081]
-  -B, --http-ip <http_ip>
-          The IP to listen on for HTTP [default: 0.0.0.0]
-  -P, --http-port <http_port>
-          The port to listen on for HTTP [default: 8080]
-  -l, --limit-name-in-labels
-          Include the Limit Name in prometheus label
-  -v...
-          Sets the level of verbosity
-      --tracing-endpoint <tracing_endpoint>
-          The endpoint for the tracing service
-      --validate
-          Validates the LIMITS_FILE and exits
-  -H, --rate-limit-headers <rate_limit_headers>
-          Enables rate limit response headers [default: NONE] [possible values: NONE, DRAFT_VERSION_03]
-  -h, --help
-          Print help
-  -V, --version
-          Print version
-
-

When using environment variables, these will override the defaults. While environment variable are themselves -overridden by the command line arguments provided. See the individual STORAGES help for more options relative to -each of the storages.

-

The OpenAPI spec of the HTTP service is -here.

-

Limitador has to be started with a YAML file that has some limits defined. -There's an example file that allows 10 requests per minute and per user_id when the HTTP method is "GET" and 5 when it is a "POST". -You can run it with Docker (replace latest with the version you want): -

docker run --rm --net=host -it -v $(pwd)/examples/limits.yaml:/home/limitador/my_limits.yaml:ro quay.io/kuadrant/limitador:latest limitador-server /home/limitador/my_limits.yaml
-

-

You can also use the YAML file when running locally: -

cargo run --release --bin limitador-server ./examples/limits.yaml
-

-

If you want to use Limitador with Envoy, there's a minimal Envoy config for -testing purposes here. The config -forwards the "userid" header and the request method to Limitador. It assumes -that there's an upstream API deployed on port 1323. You can use -echo, for example.

-

Limitador has several options that can be configured via ENV. This -doc specifies them.

-

Limits storage

-

Limitador can store its limits and counters in-memory, disk or in Redis. In-memory is -faster, but the limits are applied per instance. When using Redis, multiple -instances of Limitador can share the same limits, but it's slower.

- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/limitador-server/kubernetes/centos-pod.yaml b/dev/limitador/limitador-server/kubernetes/centos-pod.yaml deleted file mode 100644 index 520c91f9..00000000 --- a/dev/limitador/limitador-server/kubernetes/centos-pod.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: centos - labels: - app: centos -spec: - containers: - - image: centos - command: - - "sleep" - - "604800" - imagePullPolicy: IfNotPresent - name: centos - restartPolicy: Always diff --git a/dev/limitador/limitador-server/kubernetes/index.html b/dev/limitador/limitador-server/kubernetes/index.html deleted file mode 100644 index e1972adc..00000000 --- a/dev/limitador/limitador-server/kubernetes/index.html +++ /dev/null @@ -1,1666 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Kubernetes - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Kubernetes

-

The purpose of this documentation is to deploy a sample application published via AWS ELB, that will be ratelimited at infrastructure level, thanks to the use the envoyproxy sidecar container, that will be in charge of contacting to a ratelimit service (limitador), that will allow the request (or not) if it is within the permitted limits.

-

There are mainly two recommended way of using limitador in kubernetes:

-
    -
  1. There is an ingress based on envoyproxy that contacts with limitador ratelimit service before forwarding (or not) the request to the application
  2. -
  3. There is an envoyproxy sidecar container living in the application pod that contacts with limitador ratelimit service before forwarding (or not) the request to the main application container in the same pod
  4. -
-

In this example it will be described the second scenario (where there is an application with an envoyproxy sidecar container contacting to limitador service).

-
-

NOTE -
If you don't want to manually manage the sidecar container definitions on your deployments (harcoding the container spec, loading the envoy configuration from a configmap that requires a pod restart to reload possibles configuration changes...), you can use marin3r, a light weight envoy control plane that allows you to inject envoyproxy sidecar containers and dynamically consume configs from Kubernetes custom resources.

-
-

This is the network diagram of the deployed example:

-

Ratelimit

-

Components

-

In order to that that ratelimit test, you need to deploy a few components. Some of them are mandatory, and a few are optional:

-

Mandatory

-
    -
  • Application (a sample application deployment called kuard):
  • -
  • App has an envoyproxy sidecar container with its configuration file in a configmap, composed by:
      -
    • Cluster kuard points to main application container (127.0.0.1:8080)
    • -
    • Cluster kuard_ratelimit points to limitador headless service (limitador:8081)
    • -
    • Listener HTTP points to envoyproxy sidecar (0.0.0.0:38080)
    • -
    • When envoy contacts with the ratelimit service, you can define a timeout, and if there is no response within that timeout (because ratelimit is overloaded taking more time to process the request, or because rateliit service is down), you can choose from envoy to deny the request or pass it to the application. In this case, there is set a 1s timeout, and if there is no answer in this 1 second, request is passed to the application (failure_mode_deny: false), so we guarantee that the maximum overhead added by a non working ratelimit service is 1 extra second to the final response time.
    • -
    -
  • -
  • -

    App service published with type: LoadBalancer, which creates a AWS ELB. This service has an annotation to enable proxy protocol on the AWS Load balancer in order to be able to keep the real client IP at envoy level (instead of the k8s node private IP), so it can be used to ratelimit per each real client IP if desired.

    -
  • -
  • -

    Ratelimit application (a deployment called limitador):

    -
  • -
  • Limitador Configmap with limits definition (1000 rps per hostname).
  • -
  • -

    Limitador headless service published on limitador:8081. It is important to use a headless service in order to balance correctly the traffic between limitador pods, otherwise GRPC connections are not well balanced.

    -
  • -
  • -

    Redis database to persist ratelimit configuration:

    -
  • -
  • Redis service
  • -
  • Redis statefulset with a persistent volume
  • -
-

Optional

-
    -
  • Centos pod:
  • -
  • Used to executed hey tool benchmarks from the cluster, so we ensure network latency does not affect the results. Actually, to achieve better results, this pod should be on another cluster (to not share the network between client and network) and be placed on the same Region (to reduce latency). The client could be a bottle neck for the performance test.
  • -
  • This centos is going to public AWS ELB to access the app, so simulating it is a normal client from the same Region
  • -
  • Prometheus monitoring and grafana dashboard resources
  • -
-

K8s deployment

-
    -
  • -

    Deploy the redis instance that will keep the limits for different limitador pods: -

    kubectl apply -f redis-service.yaml
    -kubectl apply -f redis-statefulset.yaml
    -

    -
  • -
  • -

    Deploy limitador application. It is important to create the configmap with limitador limits before the deployment, in order to load it from limitador pods. At the moment, if you update the limits configmap you need to restart the pods. Additionally, limitador has an API in order to load limits dynamically, but for simplicity for this test a configmap has been used: -

    kubectl apply -f limitador-config-configmap.yaml
    -kubectl apply -f limitador-service.yaml
    -kubectl apply -f limitador-deployment.yaml
    -

    -
  • -
  • -

    Deploy sample kuard application with the envoyproxy sidecar container (if you do any change on the envoy configmap, remember you need to restart app pods in order to reload the config): -

    kubectl apply -f kuard-envoy-config-configmap.yaml
    -kubectl apply -f kuard-service.yaml
    -kubectl apply -f kuard-deployment.yaml
    -

    -
  • -
  • -

    At this point you shoud see all pods running, and kuard pods should have 2 containers (the main kuard container, and the envoyproxy sidecar container): -

     kubectl get pods
    -NAME                         READY   STATUS    RESTARTS   AGE
    -kuard-f859bb896-gmzxn        2/2     Running   0          7m
    -kuard-f859bb896-z95w8        2/2     Running   0          7m
    -limitador-68d494f54c-qv996   1/1     Running   0          8m
    -limitador-68d494f54c-zzmhn   1/1     Running   0          8m
    -redis-0                      1/1     Running   0          9m
    -

    -
  • -
  • -

    Now you should be able to access to kuard application using the load balancer DNS name: -

    ▶ kubectl get service kuard
    -NAME    TYPE           CLUSTER-IP       EXTERNAL-IP                                                              PORT(S)        AGE
    -kuard   LoadBalancer   172.30.117.198   a96d5449fbc3f4cd892e15e5b36cde48-457963259.us-east-1.elb.amazonaws.com   80:31450/TCP   4m
    -

    -
  • -
  • -

    If you go to the browser and paste the EXTERNAL-IP, your request will follow the next workflow:

    -
  • -
  • The requests will go from your local machine through internet to the public AWS ELB where the app is published
  • -
  • Then it will go to the NodePort of your k8s cluster nodes
  • -
  • Once on a k8s node, it will go to kuard Service Virtual IP, and will arrive to an envoyproxy sidecar container inside kuard pod
  • -
  • Envoyproxy sidecar container will contact with limitador headless Service, to authorize the requests or not:
      -
    • If the request is authorized (within the configured limits), it will send the request to the app container (0.0.0.0:8080) in the same pod, and request will end up with a HTTP 200 response
    • -
    • If the request is limited (beyond the limits), request will end up with HTTP 429 response
    • -
    -
  • -
-

Monitoring

-

Both envoyproxy sidecar and limitador applications include built-in prometheus metrics.

-

Prometheus

-

In order to scrape that metrics within a prometheus-operator deployed in the cluster, you need to create a PodMonitor resource for every application: -

kubectl apply -f kuard-podmonitor.yaml
-kubectl apply -f limitador-podmonitor.yaml
-

-

Grafana dashboard

-

Then, if you have grafana deployed in the cluster, you can import a Kuadrant Limitador grafana dashboard that we have prepared, which includes:

-
    -
  • Kuard envoyproxy sidecar metrics (globally and per pod)
  • -
  • Limitador metrics (globally and per pod)
  • -
  • And for every deployed component (limitador, kuard, redis):
  • -
  • Number of pods (total, available, unavaible, pod restarts...)
  • -
  • CPU usage per pod
  • -
  • Memory usage per pod
  • -
  • Network usage per pod
  • -
-

Benchmarking

-
    -
  • In order to check that the ratelimit is working as expected, you can use any benchmarking tool, like hey
  • -
  • You can use if you want a centos pod (better to create it on a different custer within the same Region): -
    kubectl apply -f centos-pod.yaml
    -
  • -
  • Connect to centos pod: -
    kubectl exec --stdin --tty centos -- /bin/bash
    -
  • -
  • And install hey with: -
    [root@centos /]# curl -sf https://gobinaries.com/rakyll/hey | sh
    -
  • -
  • Now you can execute the benchmark using the following escenario:
  • -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
ItemValue
TargetAWS ELB DNS Name
App pods2
Limitador pods2
Limits1.000 rps per hostname
Hey duration1 minute
Hey Traffic-c 60 -q 20 (around 1.200 rps)
-
    -
  • Theoretically:
  • -
  • It should let pass 1.000 requests, and limit 200 requests per second
  • -
  • It should let pass 60 * 1.000 = 60.0000 requests, and limit 60 * 200 = 12.000 requests per minute
  • -
  • Each limitador pod should handle half of the traffic (500 rps OK, and 200 rps limited)
  • -
-
[root@centos /]# hey -z 60s -c 60 -q 20 "http://a96d5449fbc3f4cd892e15e5b36cde48-457963259.us-east-1.elb.amazonaws.com"
-
-Summary:
-  Total:    60.0131 secs
-  Slowest:  0.1028 secs
-  Fastest:  0.0023 secs
-  Average:  0.0075 secs
-  Requests/sec: 1199.3721
-
-  Total data:   106581650 bytes
-  Size/request: 1480 bytes
-
-Response time histogram:
-  0.002 [1] |
-  0.012 [70626] |■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■■
-  0.022 [1291]  |■
-  0.032 [0] |
-  0.043 [0] |
-  0.053 [0] |
-  0.063 [0] |
-  0.073 [0] |
-  0.083 [0] |
-  0.093 [23]    |
-  0.103 [37]    |
-
-
-Latency distribution:
-  10% in 0.0053 secs
-  25% in 0.0063 secs
-  50% in 0.0073 secs
-  75% in 0.0085 secs
-  90% in 0.0096 secs
-  95% in 0.0102 secs
-  99% in 0.0139 secs
-
-Details (average, fastest, slowest):
-  DNS+dialup:   0.0001 secs, 0.0023 secs, 0.1028 secs
-  DNS-lookup:   0.0001 secs, 0.0000 secs, 0.0711 secs
-  req write:    0.0000 secs, 0.0000 secs, 0.0014 secs
-  resp wait:    0.0074 secs, 0.0023 secs, 0.0303 secs
-  resp read:    0.0000 secs, 0.0000 secs, 0.0049 secs
-
-Status code distribution:
-  [200] 60046 responses
-  [429] 11932 responses
-
-
    -
  • -

    We can see that:

    -
      -
    • Client could send 1192.2171rps (about 1200rps)
    • -
    • 60046 requests (about 60000) were OK (HTTP 200)
    • -
    • 11932 requests (about 12000) were limited (HTTP 429)
    • -
    • Average latency (since the request goes out from the client to AWS ELB, k8s node, envoyproxy container, limitador+redis, kuar app container) is 10ms
    • -
    -
  • -
  • -

    In addition, if we do a longer test with 5 minutes traffic for example, you can check with the grafana dashboard how these requests are processed by envoyproxy sidecar container of kuard pods and limitador pods:

    -
      -
    • Kuard Envoyproxy Sidecar Metrics:
        -
      • Globally it handles around 1200rps: it permits around 1krps and limits around 200rps
      • -
      • Each envoyproxy sidecar of each kuard pod handles around half of the traffic: it permits around 500rps and limits around 100rps. The balance between pods is not 100% perfect, caused by random iptables forwarding when using a k8s service - Kuard Envoyproxy Sidecar Metrics
      • -
      -
    • -
    • Limitador Metrics:
        -
      • Globally it handles around 1200rps: it permits around 1krps and limits around 200rps
      • -
      • Each limitador pod handles around half of the traffic: it permits around 500rps and limits around 100rps. The balance between pods is perfect thanks to using a headless service with GRPC connections - Limitador Metrics
      • -
      -
    • -
    -
  • -
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/limitador-server/kubernetes/kuard-deployment.yaml b/dev/limitador/limitador-server/kubernetes/kuard-deployment.yaml deleted file mode 100644 index d9c15202..00000000 --- a/dev/limitador/limitador-server/kubernetes/kuard-deployment.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kuard - labels: - app: kuard -spec: - replicas: 2 - selector: - matchLabels: - app: kuard - template: - metadata: - labels: - app: kuard - spec: - containers: - - name: kuard - image: gcr.io/kuar-demo/kuard-amd64:blue - ports: - - containerPort: 8080 - name: http - protocol: TCP - - name: envoy - image: envoyproxy/envoy:v1.16.0 - command: ["envoy"] - args: ["-c", "/etc/envoy.yaml", "--service-node", "kuard", "--service-cluster", "kuard"] - ports: - - name: envoy-http - containerPort: 38080 - protocol: TCP - - name: envoy-metrics - containerPort: 9901 - protocol: TCP - volumeMounts: - - name: kuard-envoy-config - mountPath: /etc/envoy.yaml - subPath: envoy.yaml - volumes: - - name: kuard-envoy-config - configMap: - name: kuard-envoy-config \ No newline at end of file diff --git a/dev/limitador/limitador-server/kubernetes/kuard-envoy-config-configmap.yaml b/dev/limitador/limitador-server/kubernetes/kuard-envoy-config-configmap.yaml deleted file mode 100644 index 120574aa..00000000 --- a/dev/limitador/limitador-server/kubernetes/kuard-envoy-config-configmap.yaml +++ /dev/null @@ -1,84 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: kuard-envoy-config - labels: - app: kuard -data: - envoy.yaml: > - static_resources: - clusters: - - name: kuard - connect_timeout: 2s - type: STRICT_DNS - dns_lookup_family: V4_ONLY - lb_policy: ROUND_ROBIN - load_assignment: - cluster_name: kuard - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: { address: 127.0.0.1, port_value: 8080 } - - name: kuard_ratelimit - type: STRICT_DNS - connect_timeout: 1s - dns_lookup_family: V4_ONLY - lb_policy: ROUND_ROBIN - http2_protocol_options: {} - load_assignment: - cluster_name: kuard_ratelimit - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: { address: limitador, port_value: 8081 } - listeners: - - name: http - address: - socket_address: - address: 0.0.0.0 - port_value: 38080 - filter_chains: - - use_proxy_proto: true - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - use_remote_address: true - access_log: - - name: envoy.access_loggers.file - typed_config: - "@type": type.googleapis.com/envoy.extensions.access_loggers.file.v3.FileAccessLog - path: "/dev/stdout" - stat_prefix: http - route_config: - name: local_route - virtual_hosts: - - name: kuard - domains: ["*"] - routes: - - { route: { cluster: kuard }, match: { prefix: "/" } } - rate_limits: - - actions: - - request_headers: - header_name: :authority - descriptor_key: per_hostname_per_second_burst - http_filters: - - name: envoy.filters.http.ratelimit - typed_config: - "@type": "type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit" - domain: kuard - failure_mode_deny: false - timeout: 1s - rate_limit_service: - transport_api_version: V3 - grpc_service: - { envoy_grpc: { cluster_name: kuard_ratelimit } } - - name: envoy.filters.http.router - admin: - access_log_path: /dev/stdout - address: - socket_address: - address: 0.0.0.0 - port_value: 9901 diff --git a/dev/limitador/limitador-server/kubernetes/kuard-envoyproxy-sidecar-metrics-dashboard-screenshot.png b/dev/limitador/limitador-server/kubernetes/kuard-envoyproxy-sidecar-metrics-dashboard-screenshot.png deleted file mode 100644 index 0c85cc99a35ad456fd8fa895e94ad27491e4b161..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 80203 zcmdSBcUV(v^EQgF+ls)pfuewgA|fD2M?gVJ=snUm2-15G&8-3o0veihkzPVCl2BAc zIspQNmLN4iC{jWo5I9eC>weGq^PKa2*Y8`GT)J>&t@X^zGjq>9GkK}4smySa^&}k~ z9Roz=fi4~0F-|(V|KN@v1;25Y9GC`w{oyGO(LWB3z~k00!1+Zl#fM&cu8+NZEj?`L zY+YQPZTLMOdDz&vc-pyotsZWYrK7t-2YGN`-!F9u=Nov=d%ty!yyPN((|*(8kMoLB zsyf;cRxG~E zRybWLB}{*M#ygR;(?yo1#wLm0Z2cm|(ER$Ug7B$}VS7kikO#SK(5TleuK(*{@RmQ1 z{0r)z7=NA6(R~b!d=A$4&++8R)vG6dozPwROGV{Bzt4_eI0OHELieAr|J5H;D?5Cn zosglS(?B5&W+`j2-SyLkpJQb$B;$8u1W-%DL2I*#u!9Ejvom9f7QqF{qK^5F*h_I4 z85VZ+JX`P852n;K{NyCuEpABp%)dLv{=tGIh*V*&G#aZtC_(3GV3G%m8f>^VQ}gHN zZ;%!?wBI;$#^a;ic-72)ulih$DGz6)@Mb%Y_lQlR{W7vTS;Eq)teMxKzk_pjW{$!z z)k8M&utEHy|dPErv>)_HRNWUKSL;c%hQi zU4Ps(?*6l5MXeL;y~NU}-f@3VwOju_^~oJ?b1u$RWD&}xarQBrq=&vBQ-~sC`1N<4 zcaqdY{4Y*_jUhMU{I5!0^7iCD)vUdWZH>Q zez4or)Oq`q5n{-bov?7yFeu^cS?rE};xA>s3zT#{1BaTAJcyB{pS3?*>fNGH; z0?vsG9&?5*F644N7UjAcUMqa-irf<>o2XT-=~=aCt9}Djo!rU#5_!g@--H$O?i;$e{Z9nfNzckMHIyRIyN=F8^ zsh=iQ?Y%j#qmd@1QE7|biWWbm&;TR%FW2I8x1ak_cL*8DQr^jP=!}EJT2YI150hFK z%(_kIK*MfYL`3uug+vtfm{GxblX1xDt+Df9uT(o?7l2Rg&P|g(=*@=2R{3wt_4_dVG8MPK zB?TLsF0inaLM?)fu3o(g+eOq?KWdG}q_;{x!uRpl;@<=7UrcdrGO&ISXCAa{AnGzc zTx$C4D1$cMp4(lZ&cKH3wFR89yz3x6DEwKL|c$-aXx`F%?yCj^MB_YV3lq9Z=5+HTDX(%;M^ zSA75QF;(~+dRGzdrLntpWW$b_+$u=V>=$kpyff>7udfSEzoWcM=ID(Q$LI5;2fiOb z@;dK$&UFnYAWQ}ugQP`W@#;~mX1M}pQjV+mpkztM$4S#0WyT0&lndhX=g%TmUx=#e z^=;BC)Dve#g7^GJoNBHSk++tI8v=YAzJI2Vh>q@muNZmm^yy67j3A}u(b|oKiz0rn zDRO&dsbPAN%>25hf%U+4ERg#YA{VvV8F3@g02yED}q#_)7X0|N25KCC-_+3@r{8 zk(y)V7W>s$7t?(4hBzcC!c6P81}&uo@vj57M)C}DRL`@?1=qK68raLr%Zmgpdf8ap z*tM*mhi5Zf60@60aa}}4vsK%4GP{ldQwARLHTh1FyL&luXD%h~&6~_dDuq{+lsz>) zCE`4)ulPVI0lBn*$=e|vthH*&Y--=1x*;wO4dPfViW|^3Ko}d`c^rsf?oNTg4T~RU zf?HPGk10{eO*UdFgGg^aHW`>c*nv<#zYmm6e{CZpqeX0OhOVyOd9atXWRr?_SDLi$ z*me&9;cm{vYJE7P^Qg7ZW3~f1wcZH~ony2qq)A>ufeXT;l|FDiMmBdm^>Pqp;haW> zn1Pv;@6huTXXBIJ=vd?8Y<85RFAt)oTOFsqykx!Y_HKP+-R45rK6M3EF}9rZ6&fe5 zRA2Ds`h-5pXL+E+Oq#mCyGmIgWRRN4ayR7Un%e9;`lD&~zdae;(cZ3euzxr%HMQ8d z+F@Y>EvE+@a<$dY$(ELu#dOrFp@&BW`yvK9GBQ$VP?V6Xky042Qm5RRz%M=RjF12e z#lIyEA@U0J=XalD@%yowkj=VeP?aTqT9n|ufLOY`rnB#0*<&9}V_ zh=!wO?pei#2jYXkSok2LO$Q(CyDQBbU=_xQLgVVf{r#9G;`BjqpfSb-VTdq7DFsnB z`_3K&${Q3Jz8P^!;B4XgvKz0VA+wP9qE2GXDl{z2Ve)gRs)h_NL=~c8U<^}AIs*rm zZs_AvQ+qI9o)tW;2ak=P3Z`G)UYiCIGcc{^%S}x+MHn}zvPpaCFOOCmR6CqkR8oS; zq5N%Pxlhv5W5(+|8#dZHI{NGVHwI9?uE3W(Y}%{1#|Pyuc+|@1(EILh0o8hy$CXA+ zx3&ibqrv@V=VnykAcj>5^FnHIJ}Cl)78Xs^Et5CidI_@}l2fYb`+FOgae*5e@N6!% zL_1QlD1P1MB^tXn-Ael2OdTx1kdQmMDT9UHd{oc2KXD-wL9AN?7|O=I$>pM3%YHkv ziF{I0#zoj-9Tk-q4g>jD!<#dk!;o$faUk(}LA z8#8tn78Y(0;sEj42W4h&Z$DZckjTj^DLGP&`_cjwtg35lnlpIc&@dsIU8b<}l4w7$ zVBqGM9r5)$lpTU(j=-f~sxJl&SK{EW~B~=;#|l& z(A?TTYHj2+f;5=^8jT59r7AZBkox<%)U=3|2<}+Ob>-**n?$ohE$)!_qK?>`LTlH+ zu2;{Yp0Rc^E#~CidEk+hNTBTx8&#?EZXLHzzBk?^T6r(^>Nt6qYOr3g=d2}S_E{1%nb?{^yUd=+0$K5$xXVL1ke7k{lTg;&U**GNhMDN zoP|#W?KW8K+tJ=zm=BwjSN@mTuPB1MaL)FG*e&8Z zSAp*;J{SjqK=AvRm!$@s3CPv`-D;n1;~)|V3O994YFzu8ve=vXti);jp{1o|M|U?V zrR3J~+q7kvf)@>zOeBV$^#_hg73~$&SmU8?)8r0T49`Y-FQ%{6_|(MN$m~b-FMf2W zb^u|RLO82+Lsr%tTJ3^hmkB6zZEBQ)E!}Wc{RNn{KfUeQI;E$lC+a=Q($UqWk3)J7 zprA}m)-$P+8=3*>J3EGu{!M!fZ9$@Tz4s2bTje_ChM>W>wrK66kBDxinn^iM_eDPvd`LD9^>w;*WOhhh?Q?A)bG(`@FIN(`Z)p}c~ET82f{y^@_-9>TafuHCx7%V?s_rIsa`woAYZqH_atgPF*f znRX67US5cMxx2$iVU8if7y+=IVL<_psN>KrobN;s0JJfiV{;P&?`_mTI9!Zeu-MOW zUUmXLNu~eOvyxhEj=k1r$f=006f$Zx!G1qy@DtLTXj5%GnAJKF_a{eb+`u{SW!SUJ zjN#z+=gUYm!r{j;i!lmO=x-)#v{$})wK-fF5F?wio^*>jGCg3$$mkYx6q~eR5!|pq zGfm2&ufC236#$kRkk)Wc~KUmFtA28PaQQ7n`q>g)t`NsF`IrYpAJVC=8eOzH02xoQ(=0 zo6QmkMV^EnVUsS3TR&&iC2@S|cy83CSvQHzu>}5L?`ij?VWI9F|7c^2L1UqEq2xQZ z(O;uDEb_+-R8969$T%9{w_h2rqeVhS4%zoc6M?A%sCMgNqTWhU$ zS`hZGFUeY|Be3V><}QX3EC#+trSxnS{_=BAJbLmkpCyW%QV2WMe%t!OI?;y=k!|XG z*uN^8F4YK<+>XvpdX?`tP%a?NU@;H8KRpoyz0;UJ$;J-SG7uyezE56 zyS=}EyBc+IBO2t?PubuZ>+BrAFWgUl5>j#g_vyVBo0C4j&*c-7oRu1)hF# z5_8KDAfc&EwG@Nq(o>x8*eol+F* zLM1xSZlW;=BuXJk)0=4_V?NH_%hg!B*^YRdbavXEJ1zm)gV^x)$i|j{I6G%K^AIOF71VeWqR2IEfQN zLj)A2`MeJw{P>MOsIW=-^v1|_4FPv20^3w!;MlJ)71y{r*yRAJ|>I z9dMkEfWm@jCm@XNdq3_+L`7mgTd**X4VfW*1|w!W0m!LuP8M-4{PgTt0kA`3q;-LD zl~Vx-tcyhlL9^n{?&bbfj$?*Yifne-_uJhWpjOGFmK z=Zx|+*<>U)A^aAN@>+7Fs`ZTx(ZFpr;Usm~QhBTQw~xWptua^D_3hVZI+=3-Qvh2Y z#BO$rS9+}mkqbQ&d$_(%^_5L>C5tW7JLBtHT!Xpzo3eM5l$9O7yujMjj)}rHO(;|{ z8*F8VppVjYR#a46h!UUB&gs>T3iOOKEH%%$s=y8nXJo7IbRMgT$_LP-9PNSu_MzqKW_)D3+^PA@-^yGY5iVc z6N!97&FSUKmz3@4_GGz`zgOzW?A|N=Mkh|3fZ1<1RryZH%7`x6bMf#HcUaQ(9zLAB z0C-fdZyyhd+V^L_Mf{L(hdBO+r+#8JK0?*Rfggx&aBw5HRyx=hhJ^~@j5Mr`XQd{H zx#EkXE{PR_K<?w0aiS`rK+h3QVOu$uU)?`GD$r_%Mua>exr&9kt~8bGztjtf{2HWrf4~GJy=O;SC@Ks+8t#YsG`Bf^mKDO$pYSj zLZhO`<;%x^*!abtarDO5&!Lf<&E$z9HLSqm5F#Z((Bvwl&=?WHY5(O?X+USPI=4Zo zp~k|(MR+#AHmO4us>uX0stG$RH0?!}@g)Liw)pXHDrFOc-Xvs%Fejr|aHvkxE&cZP z_K3(xJ|Si;`Tj{SX2}?Mf`wIs%?G#}Pc%m&=j`0v;xi`MI8H!l%EM_pbk!@IiKD-T zg$K5qw&J(sgW_yIV`XUSlse(nV`as7;)J^faiebN!}ddR)v?Ibuh(T|GdG9z%fSTratc)9} z-r{P?f%dPfCA)Sel)%(f^4}L1Q-Q`8hXMI?5QK_mmj(%vc^0x(({ZRcXXY&LO^B*$ z1t+hVSdF&HHpo@uI29F@{loq73B~3ONX>M0KA+KwFOS>4`#=kH4MNx@-ZAqhJAzz- zRothZ1|EHu#}-0a(gl4a`}+D+A-w)Uv>5)beoFiYpn7)>WQO)zAV0!+vl-O7m?6wi zhCG_7y@#3Zz8kG3=1+1lh zo}V$lF0^H}^_zc+JTX(m>;K)po=PVTu% z#Q780;GD7TH>yrSfHIL=D7`OkN8)iS});uS(7B z-z~^KL#P7)pO$nk219+LLVQyNkn`6X09Aq)XSeNn3%q;eQ90oc0~L8v9y9qz8CbK4 z(q?(3`{a1$TiexN9;)j7Ff=8lFmno44KS71$OG8@Zj-9sd7Z540I~~6H0MFmp#38O z+q!5L62zmCECSh>>j8>*^~$SZf0S5g(Nuzm?eY&6>*xW|DJGbqe9}PdUR*|Zbq(;9 z*5{U$xiJJdyuEhqI>-{~HJ}N@Lx{0=!BqBs4RLQ!LqZQcwP3c}shCZc+2sL_dYN^v z?aDH~+61m&ZCowinQzgQJ~cgE4HT_Q{E-iIT+i}QFUH(Hh2^mZsms+Sm(^AqQyC8_ zr-+D%Hqu&;cS>Ygb+`SN(*KggH=k|FHWbPy)FGfP#Hl z^9iI|e95v< zV5yaXnWbs?aEGNskHnMXFKVXhj1eEThuk|_UfMfFCcnq6*c@^;T zs;W}8uDoLQqxztJmWJO&?-IP~z-{Q<#NPbOOz!pXg3#5MrF@Pf;|dB_QreiDshL)S z;VW%?52WDug?POxfV|?#s)xzd5Sb#0R{;Xe6KP7jiA2h_H6QICKaM9)CK>)0Btnl8 z&SwAW1^6GCOa0#yzSNE9)(B%!(q9?JNJC>;4b8Ny!XA?c+a(Sb@N62NP~4?XTE;?wUsF|4=}q7_5d}dXp8YB2 zlp(0NSXUd1&SH3tYn=S)9RE%8@-Ot*%U5uQS@z#S%ObjpZ&ANDGqohp)6)-Le-?gI zTG|8vw8in9O7?(FAxQt*@<&x3Ht_dZ@4d}hqgn6H>x?*w(SSI9fj{9-u+@(P#trNo zU1)8c@x_Z5krF~WOr(V(V35R@JmLSHRQEjfh<*JkD(X$Q112YNz#x`egI0mP*2%WF z@w%e2k_$pZl>pBGx($H(wihacg+_Xy+HIiQe-k(z6*wSLX43Y*tWASb4m|rjA2h$u zCQ8I}PF?ZA1H0aD_l+AIs?GiOTdby?hRY_6j3zc901W}C{OPaozdc|1l)kki)2Cn- zUR$2qH%py(NPY9|lG!Q5!9_W+_6R|qu^&S|snc|4n;b#oC0^{NVJd4Phd|NP6eD(Jt? z{`n>CFU2vaI*TeQF-1pKw)b*cmGqr>Q?AT5s!eR#{bP8gDultfD@jO+bg za;tC0=)(1E%FoT1E&HI%+4te#xw!^KLv{xJH)-zzY#(+&C7)wv&yC^MfMXW!vJ}x; zlYSniy-(q<@A@p|Q)CCQV%f=;^H-5CXYc_G~Fj+UB6!v+ie|5rcN)( z+vI@^1)%1e5ZG3o?a2i1y$>Kku?V(77#4vPB;+yQzgzR&!`Zo5#II)oqUi4IdMv_r ztW8|mAEg!4{9nw_x9?&9GnSi?oF; zf@ro8|1<%=iV52EOYT|B9rPvz!(WMu`sw>C%sc|pd|ChYF+&bq(u)*!uawmecv zFb(^e_R~J`?N()KXJ}4Njy=dnKoWsO;(M!mw^ieBF3cu|I}UXj8CO2m$Xw$$7J(id zIy~bxEaKY}7__@wi(JTcK~@I%CaH(b^`sSDv_QvklJ=X-7yEN6!5-p*R5tD?86_nx zj+K_eV`JlAzkUbOGY7C;IU&zDE*1|VOxi&at0PaZ%4uYsZs_0Wt1Xm@d2S6k)tqgc z9%P(kWJH+^=ZN5EVP-B=KFVYOVDFi;X9w%lY-P6QAHlPQ9<>6#j8jorkqFu&ME!cR zDn^_@MpfH@F}ZaWMy)>WA#s^i)FHdX#j_d|eZAbR-qWi8AjQF+)ncUaU$ZZA;6`uC zCr`#O=;@ojmH*6tGV)}+rEvT08H%lkw;z&0tJYG5J64_B_hMZ~iaPg2?mSL~u=ecn zuDhYo5`DW7P21Cl&WozUH{wp$UGzU0SpxohCK^*2nCF-u@PS7_K{yqT(d)evBT>|5Zx|=z%0apKdCF9(uG$@Yp-H4qV;e|_3F~d1kB{3uheCa z;WY3Ej^Pb&=FMxC8NZrg*?3?&oyr)V%Gmz6y4_m1bJFd@l$+M|aIIQt&2Y0LxRo90 zpjK(qwNYoyB=r*5?IZ@*loa)JFze&>A&#Rr+#^%&`p1BV*S0b)exb)_v!rlV!(VPmYG+mTfMqk3^r!U}%#W z=edw~itID_J!T=!k3Qyx)mxn}AE$B;=r1$o?Mhtn-x5LxbNkou){S-WHs7A{Le?Is z+cH4ceDl(Bt(CH76xwN7x`dp>K(%=k8Dhy2e|1)NTz1$Rk(1dCtKUUpChhsOE^0L? zXol;QakPsD`Pn@$tSaP0b@QUSuZH8OXotk+71qbKk~3wJ^;AYw(S(a9GJFizY(^DJ zQ3wlQt8YDG`$IG-8Gl>lVgK7lPpFH`_G>`NiS-Jnlckpz zn_^Hb3%>QDGoC#@h%8^$CU;#!)spvTKF`!DviNEWP6fKOEnv69EN@IdNO*tdnr6Z0 zr;mz@A|hp1Q4!3t=S@`z}^oak;rsD4PSVJL_;d7(2heA+l%X#^9cA+!m7FI=*o?GPRD| z%tP4#T1yCA@>EX@j3&xr={1!<34Ouf!%%WW>ypn|d^>4a`V#SRx3j@@+V zdIh5JoTX0Y##{kG9lg^}FkL`t_(`2o=s>qa?H<^D^6w@j51DY_AI=~Sr?F<)QmxyP z9rf(qjs&46Wy$&9MlBk5QG4VdY|!5j?pO>Veb5b9VBJt_z|vZAv|~^MxtJTGiS{Gk z4eDV_r{a@rhiohd1BY_v9Z8rv-!9EgsNbx%EAD((XKm$+NVAO3N6367(Wza5p@t`Q zg3~p?1~2ya8QE~In3J^btOOpXGr=~Ie}da2y_po!h}%Uc+wKyvuj!t=;}bv8K0JJ3 zq*%-h1~CIH=z|n-SAmN0PjXDW0II`j1QV^9V|s#(4-)a}g+2ll@AEXxxB#WEsTs`) z3ULbf1+B|+0!4#2A;#q%p{!;@DtByq^dCHUew2Ydd$7om2$K0*Vm+XzBt4!alQDoW zksiK!<+vR=g7?C*OT~qgQ5E^RRf_TNZ-IZ^sPcGGB;EhM*A83Ik!+)9o@+LAE5!qJ zYQ8-s$ySmgjEyQIOh)YDJ3J4shy=2K^LKg0&_0R8_(-L}n+3vg*~R(+lXQ_E2)tWj z^Kqv&!xifm7+1T^K6NJ_benu)$a3#su=vDKH3_?UC0wCH%m&4g&Aj%#*{yqc_jU^* z47cFr=lVis`V?eRXtE-g&~f`N(|EV=rLeA*@IcTTe)WiW1H8UnynTWE7kcB*ZcER$ za}MjWZ#^e)%y;i@atClu9E(Z!Uhirpl?{vH6C9x`A9#eO1N4@IwTT)Z&fSe8$zE|k zr~d&{@bpXm;MHa;4>`XL-qm20Y5#Y+cB!jAk3BmUZqMMpI)xRN#Pbe+t$%qp!};TG z*~jkzAEq^~>~bHc*L|w!EU&n^?JS>n%8LpGy~`A!4cXO3-^cV?3} zk0*puHq9J)%|{q zYvtIJMy6is{_W3Wu5IzJygMdub7mY{AG+-ObSf^qJa1(>J;+c=$7XNW*$~y0@bp0y zA(eBLd#aL2lAaw86IW~s4=XuXvebn355~UpE&MKE{zailk1&lYZtJ}vxz2Wk$w$U{ z+RIr1KV43KXj*2rBSW{9gij~D&Vjc}*Q*D{{%2nX&S_Et+*vcqJpEA_+0tI8)i$Fd z&-|D~TG&lV`RuNz#6#`g7r!O*Uy1-F}Iv{Q!Ey4+V-o-dhYkKpCYB1>*eA%g1u z4$qv-ZkZ^$r1yAafWKzO21-j8o#N%`s>O}=@lpNU*U&wiT6OcTSoVuYj-UMTCQbNb z^LbTKgbT~;35k{lcGfRIe7nPh-Mia*O^X^Z)@LiX74*Ti?X@UYz%##K#_01+noNIbweJ$ zES$0@d)V-fqN^NBA4-^-&vDr#I4cXTQ=0YaWIta%U6HYwlqWq&;5YcP*0=g_I-uB_ zoN_s{LuUTF93!*wChs9;Uv(eBUQ2PJwL@^`EM<$rPM{aWzFxhD-cR?vVPdN%ANUlm z2ghMgq(7>w$DV|*uIa~-*DoBhfu)-?Rs0g)kzUMh%e>mPnTRl(GICKv4K(O%s z`3=sT@w&E?9g@QAjtG__M+1D~=4!gFZ}vcI((-Eo+m`l-!l}@QpU*Knw4Jkrw_CNH zvsyQ|O;%7B4t;A~^Wor0mkfSsf5Ori7chb-k&Elps9hXRIF+;1aqHS!`b+4goBwZf zf92Cm@miK*D7<+a;5%PmJ{j7E}bOh_G_rROBOM~e40Ac|(I5z&Bg=!_Jj0gXE_b4aRZ&pwS*r~38Q54i^&Ez*A&!3LIU#0~aU1_d>#E6Mx}YHWJq8nZ-=?ZD4efVXvnzLYk4mW*?P7H6#UKA~~A1oB5l^H;Xp~Fyd0Y2nFCNwl0bm_#ZDE;T7g?^>Z`RazI z@?iF+ieBhH%^3TIhf`t}zYRTd*sbL*oOmCWpv6zI{YXdmi6!p&O!*__L2_mze*Y|w zX1;BVIX)P$zL^T)=aXHi6b2ggGf_4o*E6)lVasw;nzT`nOxe+g^jD%~^va4s$M00+ z!8)HKpaEURx9^x-q`P-D(YrVg2{pu`rwxxvpZk&Jx*hTyPBWJEj$ITj$~>mzJ^22h z1p9`q{SzZSom+&*4&Xk%?Rk?Lpl#M-RZANhpBR4wt zXsrqy=dWFrT<_Mwujoyb{Xv&1V|LUm=ejh!I)DU7xSurS%?l>!3Tk;y`c}kbr0WA) zeu`g1^s*|0&qF#ogV$j6>Pp$E%FGo(^E~efXM+6CW}iO>(?!F*-H7hhRFktgmsK!} z7ssBmouSkB`%!t9XlfaFp&!#HZnrk$&3XMtzsbG2*BQ>3sTeFDkBk<%-lL&#Acppm z+L>##RlOQ?HrD8NZBVZ?H#)bZ{X-gbCr9GFdUtDP`sCo%D`OMSXire72WhRu^(d5S z@loeRyZwNor))oYs(a-gch;-j`i)06mKXPRm0N})7-e)XJ`Dov++THi` zrK7d*DcoA%F@qB3i%YNR-pOA9uXQ8#wOi#%P(x%**90QyQuH6vmwzne$(@>@HCe8p zofIjVaa|3UdeFB*_wh&!knTJE6QrBOmB}=bg9c764i$QzpKUjEAx4xnr*10wW~kEh zLgKffPmIUt1ng*?P+Y1`W@^qWDBX#M@(2D#Lw|+;WYXLWoz2RWQ|49-SIfnDZ%K*# zc*slILmqa!*ykfP3Ky^t$6qVC_tE-tXaB3jkU?j(Y0-z#_-Aa4bf#{!)|QBYaxMC# zE^b76Isb6xPhI?xu19g-6s}(N&T+6X5*_XX3kap%`}J6Qqf&Sc8+J=^_m?U@N>=|h zWh{!UX=JS_JqyJB?dg3`Vf%KT)!s0s)}?pUwH7RCnzkm|apgvW)>yH_OLqq!wPu=O zviw0KxFEU>bD{(997TiVJPI_~drtk5&Hd{AT{ht)4F_x`q)g$^lMQxI0)Jxc?V25F zg6&_ii0*Xp{w;eC%iZ@usxog&1P+@V1N%&(6qInDoGcl8UDJUs=UoUf`SsTK>MC8S zraG$R5+w||DpSppAANIba5uizBvx0E(G!*NbkB^+EedJTs zDo%aPo6qsnF~`{%!nGfphOVP4`4OfV%Dnhhxzw;a@)1o(sp3FK_l}%C<1)l1=%v4o zKj*yXZd=y;^Ai%T)VnS@#^Ze|B;$eOJ~!yx%xR{1{$96|ffxC^rm>K7?3Sw(^OD<7 zz4in|%9)pbNf7nkA(3dd5f=cA?Aw2^*(M62E2;Ub_=oSiW>S9}*{6r3K|@98OZ}pi zD4O8n9DiIsWb>C}sJ^h{2qEQ{I6oe--xxQmm>V9L? zG(OU5z)@Wsaj@pQY#wy`mqF*^4U`CvO_(I1~+T)!@w?i1cl7@QwM{bL_=vVW68Mz~rD^M$UOKD3SO`X1@<$%4|KDB?NP z)q8OE%u&19h6XMO&l{@$Up>J>v1ffoaiDF(e2TBF%A$!BBXUY-0Fy?dzDH)sN;G{fH* z43I#<1O+toIGv2V1OA>ylXEJ5d*@`W!q2UHkEh^fF;J8gviv5L5wqgrYmjtvO%-;_k*d&f!J#;ToLhwZUB{{MrmAV?jM3v)p zf7j@{N{*AbecuH1GM1+A$DcWKG@tABtGMjkuiRXxR3w~@goJ{;(0d2^o}QIp#6pA+ zAxRk$Zvu0rQ|&yUjsIv1IR;ytbVu}H#kGU5u~6R1LEjA~PZ7sBwmC*>5aArbDzSoT z#bIGb8M-?$SeGURd;4db91~PAu<=V!e?2Y{R?4+&S*`i-Pkg6C_9lpv`CP5S=5W#) zk?baXtsdiyC+MUM5_ve!o;d@?Fdxcn5N+jc&;owu;U`ZH3=QRS?9U%d7I#fgPEOWL z!S*erxh(Wm^cutgsHhLdtb%toW8vAs?$o%XHai!o!qN*Hp#V5H`HuU=3bQX&>_DmsPBwO_|*%Lhut07 zvXrPzH(%Yh&L_7$Xz;Z#t7FQ-2V}wutS~32omrmM1d^_3Ty478iAO$fB9SrbF4%zk z_wVQN7j>S6Z^%k(t=BkCa3l-K` zee)8w8hSDZm%?N>gyo)}iB?+nKlpARd;2jc8ke3ISMk>7tX%n;=_m{1^ZE9t1LCn`%f>i@U9my#vq}>UsX3Sm6i@WR!p`nT}FzW0DrqihYlkzhAU$Q(d+b_UvZt}~(dS*E! z+_Xc2cLYJ7Rroo!^Eo7=GN3-+TQ z+$+UghQSvV*nvR{L$vJfD=Vu zMD{vWwcxsR>wn6~ni!A0Y-X7XB^GtG&ejZPZS$cgTG?u)ua^YaDJ{*J*gSK~6Lc<+ zi+8o}>^s++H-&`$iec|kOvvj38jUW+kN3v>5R~iF<8(0Q63`&TGV@9U!zCUy1n0rl z{qiiZ7s(9_Hz%Nox(VdKGQh--Xn2J%a^#MO$Wp%(VtbkSmq70^=^^u!0uoskC>-5h zz_V_*{eT4`!FUy}$J}1ed>~2eKSQFqsvAs4rE4_`LiNob09N_xDH>png{DK;G`A zt@73TaRBgOMdND=$$r!$u-%v5>ot_$QMO@N&SLvsB2}%?YHD}Bmy$($e>S&JPCBo@ zPhevPdewxhX9`IFWRU>P7VmCo<}hjL?vH1>fK-8vhR8I1X7?Z!LD6D+6E44H_~KpSPRyOU7aWpMO#`x(E3(t1JZtjRtpIIaa~d zS7?coR|M^?ce~q-*Jq^u8;e{soy^sZUq z+xRC3=)EH{RAaaH$dRVt0t?+&w5#sXu6oqLmQhl7(DVCB&uz_7*ffDpglwW0mzqLT z*EQOh&GrR(bYvi$p!hTAe@8F^>(OZRe^nu%>J4n;^mDM-zz25)|LL@7__RLZ(su;o zAsXmV1I+!!pR~s*e6I(j0`1roBbwuLs_Gv^CB+iJ@~_c>he(8vyFdHAG*}M8l}$Ti z_(u13?HW+jyF{7*Y=tH(uZwyKqa%M-#qX_FhZvN1kg@Zga!EOj)b{`e-V>yCWAtasOqx@6_T~N`^Y*TrecL)J))OfStT!q4fY;UY10zuc-Zg@-@|sazg;-L|H{0 z%f6aPj|1D>6b1{pas{ARmw@#AEv)OAOXBZFFmlpKiB#NRr;z*Uk$!j#v*@h z&0c zv$MKOS3Q3VZ%n$XTz!m%qu04<@0IRRo)2t{PuRg1>pXdJXSt@_!EwtTD;-MPPCwOq zft14lBekwN_II-^K255l$@&=ZR#)DeN`}A->0=81GJE;^MP&NdS!A`XzS3!+oQuJ4 z=dS{1_2NPXb>EB4)Z{%pE&ks-+l@`g<}ZGJgq&!w3$q;;;-`Q_kU~Hru^#m^Z2o~kCD;=` zj|Npev!ea|NCTvsX8L+5`yA~f1p*DxloHg+z<3?Bowxyk?|pMAS%SDN?0sV8!C zp8Xg4TiQUtx7G;S?c>O80Fq+C*M@*;+@kV=g;%p&TwG=suRSrpNqu_G%wzfnKg{}f zMnreoa#b!ePFC1ReBUd9S*BhIZ}t~6YzieB?RZJjDjPI57R^0AK%51$ z>!9^bdEv`i*qwxM9!?(h3#HPlNVS?}OhwM>Vz>CjfGE}!0Y*f0a_#L#Dr_iYMWqmU zqsLgHcguNCeJJr@4?E$fpb-^?oF@9_*VGt-_QlbLfEebxsC#gcgY+FR^8XT`Q`nl8 zphI_ZwOO45omXbPny~0n|E-4FzhZye!0DW+dkBZf7~+OpgQ=@rohDCVGQiFE#Cvm2 zv}b?$n}Tnc(3|@Bhw}(hqX|ZVs;XRgmoT?Q_#i4O#%p=+>8Xphx8w}e)j4o?4&(;M z$BXOh?{?Q&MK_YVT_Nyvzx5Ne7PB?#G-%XR?{3;nS>FkSEv>c6ZzwA&5*38p@xlU6uI<<pQ~fji~%!uD?Z2P02;3p@sTY7R$jCxnV01oC*tBtBhp$80^ap4 zyTPi`CCnqW(91Bv8#m^wY`l%+`){9G##JgK5VU^IPAK^H5etCLv;mOMT%fd)p+^2m ztePU}<$KMyz!;R^-drf74VcuZa3U%YNPc&PPbox2MNzhDo3h-ezuM0gCm~(%-pB)6 za)x_)kLivKBs+Lj1sl5i{z1QIze3Q777_HF`N&)HCc%EckLJaa>?5g}hqY5J0x;h~tfT z8D-$*RV939>`vRWevHB_CJ)_<4+;KCBwWpJj9nYZL^Vp_!iFZSL$s;TYmAN1O|sK`}pfPe}}7eRVg6bQY9-c))Q zBp@Y3L_w6Ip^5ZP=pwzU2na|o0YXQlh8_Y5Nq#%(_1^Dq-Zk^iTJz5QGiSNftel*) z_p|qY`ty9ksP2(#i=})V3IBc7ofl#!AfIrE=HybhAmvV^%;qbQ@JlszMa9G%LHiez zYL6e)0ZWd{XU`5MdzbU8$rBf~M7U)&n8jU(TIiV1val3_C0H73KQ!Uo94VlGOEn^S11*m5-4JoB|4cSO<&e5n{&w6IE+jk4nhetec%dN|sJ*|+DR{Y)#t<3yQ*Ail`)YdjlprkC(74eK7764#$nM^^z1PR z@W?cNz5t*8C<(OLTeXvXRLS)7O8FF;XDdJ)JPO|n%|9!hS`1{(!hN76c=`pk<~V^? zLA|xzuFyTQ2@vl;f6_xc`WEhs&FO$)5gPisUUk%;+@k)(iNWn+R$XX3RiP{w^QC&d zeq0J%ru)Mt<~v#bHW3i2(qj%(vd6B=OKw^^rZ2UaEk>|F&-!`Q^bordsY36A?RRaA zsj3xK1L?tX*wXdHrBs>JP6|Lj)|~!5m*PHAGyll4%?oMZ9)P0DM0W6-?5@xdB`wBC4u8M(L?T36rqdslce*!bm_6O(R@@6N`9hfnE&k$e!{dl)Mv;xDXS|c^z z<*(^=4T}JRtg{S86tw$h(zZkEY2@Fv=2DTxokOLTF3_E4K_ZLULeytp z1e)4{@WVz?WBnsAsj3R92K@8d)=jX)m6lcBxuSmBGk`DNd;T=FFSzy4m@+s|t)Yqy2&O#Y;0l^Ik{MB_*QvtDCmF zZX|#$_-nQt5akMe`zF3x7_hN;T#VAeYfpuPmDQR{uU3qaOlJ?JXZ`Id7hC$#pX;lY3;9@>@CjU|hKWw`&Ke-pBrW4T!UQ`x=GN z?3m}&n~bObZ37(e&o%EjiE?%;m&5lPFEO0QdPykU2CDazRqc*^PTnKaXUD`rBfq~> z^Zl)X$D@2H664tvY0GT|eL)>b5Ex11Ty{$L(9j*Cen9Hb?^T_$a>{cXq`H`e;^=!9 zI%oi&|LYWWG%ELLwg;)!!2}}UeMF2X*xSEoeJb%k_xvg;?aKg7uz!pdx{IV#?2OMn31${6}iBA2=ME$j|hM(@l7NoznFg4#(`^}}K$dOnJSG>u0L^ku2R zurTxz=o}JvPi3uPE7`(lE~e-Ny~3M&dZnVaUda=__T~J=g}Z5?t|7m~-GL%w@+X>_ z?bzdkZg<0W3MGIcq!2j-q5^7yN$J!&kmS8dC=?o!-?FT6f^{T3>HN3YE{1FG=xB}b{6L8y%-Ya|aKk4M)&=9Yrq;akHTJICP^D4;ilQ%5m%!G9; ziVX$Hi+4cnRzgs)<;F%Kw2|a|X_iN1g97sD_3QGvUZ5Kzsx~?D)Xr0ON z!(92WNFNp+x|rejost85vN3USIg*B7dn+6E1R#A3T5@T?)ymy~sI%=%MD2Ma^nj~8 zQkHB)JRk95D2RSP(it-Z8c1fg#c3KSDh36ApIdaQoJ=b#yGe*#3m_KIwoNyMc()T4 zAwTT^^ScVRVM^n#vJO$WqkQ<4fF%HXB+t%1p7mww>QcQo)#2|$n9cZa*#hYk<5v2r zUSTNa21K=2qwjVdLT+~Zb=wa+V0S%KF8vxjIFp0!!gU4UfTiCah|+2+Z5sR+Apr1I?aO0X-<}-SAZ)&& zXEDVd3+q?Tp3q%0fBR~sKPNI^Gm34w`#7ZlPTa>Eh@AkA`V}1~r+f|v(2A z56Cwl>?g<@B>i)-Dl!S6@_*skWL-}hKOdiItP6ahjx|y__#V}rK1ts37I7Vu=u8yV zLwD9{;wdZS$SwUqRx1#L$lZICZ$Q46|IeX{)HU@mw7BbBznHm05K?q0vLdlc_B&O4vyRXvA35_vA>Ufw@b4div;AQLN3PojfV@XjYEH zY)j{8)xC`Lj0|d1FInQs4PeFfx>-KD8S#*Sy&ss&D=n{~=UT13CR-VQ4GV{s^z*xSC#ZoEkGZ`5_BejyJYWDN!{Vu} za6xsnR5Z1D@XPZ8axSCK1O)~_*g_TAKne0woC6WPnzaTt?>9rspzB#qfX|NLq3yd2 za?-koZp@S~^p^i4d+OWe5nF-T{u@_7pUZ_0dG~?rsVV$n_~1K9Ls_K)%FC3}j^FY- z)zE|zXRg}*0^N+ZobuvUs2sF0RPo}XC(TUvEbA4Q9QMr!WF+C@*E9#;##Q9kEWG(G zwG@gg;O>D`8*dxIk~1ji>MeaS6%+dWDaT)sV9VI7-uIi^c*_DL_Wcj5WZR=x2iY2} zuDJOoSSA|;?5vrP4oAt1!Rk$~9S9wCO|QAON97w^ML3SMYqXWJK!Tl3H%%hIUn858 z@{7h5)+5Mnq%(%ruiMgfOXU%j56_PTigLDBb8Bohh-fzlV>jkHe!Q@I=NVyZ!;tJw~jnx z6Fb_Gv`%{>Nz8qHJjy~Dp8ljmDn*Ve|Zjds{nfJ?XJ^a)BHQe#j3D>Q~-Gq{}FAJKTL6oUYzxB^8Jdnj=4b zav6nc9{TX;#;aFe#^^suOWLYDff2ivGMm!%3SBxU^l7w*$EDDU`IwWX z6g+tfg9_5*y>YdDlJyhj_LQ5S^aeU*fJ|8zvu4uFl?|?hP`0sHVTs1DoeQ5Z_QX0Y zcDo>P2g_aM@~k+jze1ngUrxSb6VZT`V9EAA*l}Xeyo4XRdbmA^R!}+Ogk(h{-J9&* z=^IT3;bA95Z4;+SCFnrPa74yJ?gy_^6?8|?=GRYg^OVss2P1qVc^E(5PCMoA!X2Ox zRd>FbdzAgo5yBLA7FkSpOdZt zi5+#bWtC6YUE~LpSQd@mXZ-OlM-$!fb$tgbp_E&nSfk}WFwu}f5Nl&)X7Ar_WN_9> zt2jMm$yZR%7)Zu|)}stS%_(wv5H0gT#>sS5+bh_Gpl&nR%93^B`Y6%r$f&xJJj=>* z5bG3CImX;4UdX{q zDn0P|xEFS^13E1HgrxdlgbQ;Khqdw?cB4V(xVN_xa^Wuiw_ci8XaN)NCq}?k8|4m> zgBYE=wRDrQ>UuAPF?f9T^kkPt6NR!4uX9k*Z*{iGdJ`DH`QY@EeS{(ruP|xF6fDD< zRNCz(f9mq^_nlZHrleWgf|no2!X@F5K&(ie+lKgT3EU%f^;5-Mx~i#S-&bw<*%Z#I zdLnZmT$QC#i`iKo+X3T=0`ZcDRDYSd$20_=p$4=vd`v z49nSukaX7lU=nhFZfAUyAk8TmAuto!NIgwnQ7Kj1^>2}S5hMGpCRnalhqbQ0e!>;| z6#crf;N75(|H>j}CC5G8nCY*a5~~G0{Ng?0!j$?wlG!6R#(m7d68E+5Cz8~Lg%$5i zx6aQ?R5(gH@MTB#PG9&w;39KIuk~7^A{$#neWeczE>sTvv8z@5`eLTiRi%(6%=OX8 zxuC}L`Eu^WV&oI;D`GB4WaIIQn)aBJJCe4!8s%(i!ETef>yP><0a#j7 z9@-X%Wy(N6#jLMPePg*Iyb^(k9++Gxk)^?Z{7AC)tM1>}O5B<4niBi2BGqf1Z>gS2 zPwN~-wxWTJbsfT_*w7%XD%j*DbcffDD7#otd}&t&oiPi$@K1!yDPLNb$w2b;{o0X; z=;Rgs0mzLN@3dQhg^?ZTj*LD&)ez}m+4!-BuE7pNo~_%#Yn~rfe%6S6+Bmskgtps@ z6wtEliI0W1M9Rz=nUp#!a=mrYACS(ITTS%2`hkK{)Fd5$AA`qSt*rRuUkDEP0Bgk}|XVOgf+#op)YdhEg)J_0;^BN>3zT6Dbb zM%KxpI_JMEZ&9v0Y(eBY3=%l^{K;(RxauSPV;LT4Ioi7qe8+W9`u7>Qmyb;IUTpf4 zB6P9tG}n5QwDWpC{6N>Aqy@1PsH{$Hf9KYlf1yYrvEM9X8&|%9ypz8!?1Ht;MW{bW zeorTnNLDOrie`Dp?C1y^5X_#9ba{5vEipp=lcIc;H2F{Zt2Ix8xP{(Dr1P_{H;q!z z%&H9|foua_A-8Wm(k-!i(nXoY*zv5hn+CGYDiI%hOeuS=hxijOl_COoBp%Guk9v$U z96)}fy(*%?zCHTqiK0hOf&^KPj(s{U>d0nL&d^Yl@UlE>uNF$YzCES;!sq;|0si_@?+$ci3ju_%?PfKe>&QddH2?}m2#Jb zM^V_dAQwbw7xMxF?&UYR@}4uLJ7r}9KdyP#7s0m5roJ}zaiHs*jQf>0U!$MUGAafq zet7KfhmkOC?4ADhLQgZd#6_7k@|2^WowIiu`eOy-0`bsVB_2mUFQ=BbS;qBd;2vXI z`iJAnIYXZogTi$EpP(38S56Y2=+U@fKLlLfua5xLIF-iI*K=3jCu-l4G0PPQ@mBu$ zw6LqEr|J7UXXRcU+th$jv|mYz*ptv0ZG?aO+3Ga8p;wFx`7Rj5$~4<6(OZ~@(GG`w z3+|Hk7@Wyi%%3=079>2d+wsK8Z1=!?q7?VV{nE6GzjiDhXpg>WNVn(jPS?Z596yqD`pPsJv%H`)zJ9p8!@?!bkKI|{`gTeHD zA@mJu*ft4eZ2wb2(T4`PM^|pG-#!FMV~Br3RAXep=YPaqbY`9=qNijcX1~k|NuT%} zlC4J!WwE`>EYh@d1iN-*c|yLp8+o4>o+cr~I$t*XbZ7Pig1DV>=^k&(th^?z);S+_ zijeDlgx<&5Kaw_Cf{2&dhYgxWeI8JNO7LgWgP!~4Jg%Oexm}8_4=SuX5a|{Vr0HGp z(_G6(h|kvRRSbfbhOOyNpm@k2AO2HKPgE zHB^Tr4z}xlnIc%e&edqq%L5h*Du>j-fQzcqP4rFH8#d1V{yEKQ8B{GTf+9lq@4;2p64U_n7Usd zv&6_!Q^bg;DoZeS?Fq4KYTv~|8O9Y>vROh}Lv8V_TpLHPDq^)=j7Bn>t|1c=zyrGa!fumYCh$CQebqsI=FO#a#>l$Z%dmy-U4vZyi4MLI zV`B|T+H3PK=e!QoE4jKRzIf?Wd$*0vX40<$9K@3d_)z==kvqk^sGDBiVqanl=S+tC zCt>dS-*#S0%37}QUdom3=+w}0-w>*7dXUuW87N^z~DW8-Wiu9a=lo5Fha6-AdLUY5^zn*u64I7AKhLZ^xou8?(}Bhz|k@klQ!(TGce)Yj1mo80#2O%fj=+6ZD)T z*5&Z&FuN}P7FOx-EBQG^emy>SUL}-H8z}KOfR&?IpFa(5(&46QkA&r{Q@f;<|!QP9ypUx8Av1M-* zj5=N_%XF^lU}^5BzG+p{kbM67n$i5GnAo?HUO&B$<7Qis#RoANP5QB2o`Y`p5sD6n z=s0oW(Q&RzGt+QUiB70Go8q0bv0yM#P}dN01ui;4Gu4y_V_H~)oUQpT9p|vc?5AH> z*awW)t%#=_J%`LiymL$>C@bm{sOmTi?}akA|~O{`Fzc8TuBv~BZ}KaAJ|k6AO*Cv@)AXVDQcZj@23 zc3KV|*_ulo8TnUlI{^ZhnwYjGWU=d$m8YYj7~Y}^MMLW=9%l>W^9ZF!;d5-}98S)F4skz1=Y^U4&(WFu_Q0+cJMX_I&>8-1yeGQ5%)R%cY{VY}ps%w>B z(4y;*REYQ9g&H{Jr<0v&D(y6ter~0yS*I;VDWmTTca2+V)UVQCkB+*}%h%j9d|9W) zH$82|T2DtOST)Zj25QQCObr?f#n|l~-c~AaZpBrmko4X+3h5kSj##4lAQTVQoB_uy z;YP-Z@%=4HlfBkLDT`hZl$FprnzI)bp?E``u4A3s?UnK&A~SDMsb^-pfqP6wCk>pi ztXdo)Ud!?;yFydn?~@LeRtz23`Fbb)HwK-2}h}y?`0w#P0!M4jHa*_0-Gg-ubKpQn#;z`xo`A+;qu9-=E!0_ z)AwkwV4Bng%RWk{)fX*jktrU~@CmEkLwlbn!4!Kc>v9m_Ln5Lg;~90Hk($0NET}Ti zvXS|Yh zWgtKHYJHn&rBT6@*uZ_?EQZzcj5iC*9!bLqp&F8wVc|0AlV4-;PN}xvfmLFtM zd}1{0F220w-3y(}L&(%A=`|)6N7n_8HY}t9r)Cn!Fzf03*3FXlH0)6!x!yGR5gsze{z7mXkcnq;6?qXOQreDA&6+8^OD;)M1-~kBJWGKK#P5Xw zmS@!{xksxVA`}zl$cuQcjMjW%pqU2@A+G2|elRBJB9&V^VdCTHy$j5%^fGKu4uOQr z#c?}vs<)pSk{tgMZ7LXz&&;B?m;{-Nrvxr*wrk?J0gVh^Q25_}<%krHqBE|bN~5|i z1DpDcab^Dc+RmI!sjllkIgtYXK^+kI)=ubCwTeEvb(C*}M5y9yh|(f~C;RAUrX>cK zTp-BYV<^41sQY?o7%;WA(&~gb=AA;B^s=;2!c3DPl~;l>t)r-G7b@v(p8g_8ly}q& z#28Om-IaIvaw#$xu*bMnorz0&LH+71{O`}8(NLTE!cAXO(_{PsMR?1_VgEjC?$4Bi z$6DD%A9c)wvRLl0bVSY2WpTZwo;4t5KV6R$M(#&m$&;MUEo;$*gcr{6nr4-QEO;Z{ zLfF{e=~J0`B^KZ?6#`Evbgn|_>=|&>*4wB93-rNjqKmvwqDoV{u-d*^Ppj^Nb*uAK z1nj9@LzGjI7qyv!)z<|~J1AB)Pe8P+ibd$>{(e9{%n*|^LC{C)GVDXz!#MO5`!zIJ ztaxu!7Ym=(h#4x2EIeySgNzxB&5MXCSmAFFGhN&Fjk-obL)sgCtGupp@`WaY;QG{+ z8|?!?;`X*ry7g|fUj?~88%IAgG6{bfcy*H;lu>t+buK$zm~M~$`9T^b(_?jj+#6=f zKSwd82xaqz!pHg|t}K_~g%27+ZLh}H*q109M&K>=8C_IG=^|bLDac!?*j-pqz@@VX zIa^$$Gier?JuYVOBi(ZP02?zcBDN*2sFL&&?C_ZRHMjt(kD;Wxk~ZG3=IQ~6P7zRwCsfOEE^hfzb1U- zCsEAi-=AP9^6;!`M_NNRRgpkn0fT7Q*s=%W#(0mc%om^SS=DE7?-}JqJ79-E{dlWY zLlCXW0;!yi1x`Z(ja9nf&9_`SEr6_$JAE4Y!6#Na;FVJ6g#w$#QNVx3Z&8dCgY-yt zsz6v<{9!GOB?q#{#;Jvdhyx!6rQ;F0;ee8k6*K7~H zEF%lnLft`d{y|N2tEUJM8uu7ksp5mb2|+wk%;_?=7GnbeCsmBqUeB1W+xu5A_<=2q zE@Fv^--T~wH>~{+cU@Z~D$H8XxGlSd9>;y74RA74yD#W0zE9iEt{)pQBms6IYU$D1 zSLihg_MG|VUN6(DQ7z2*^C0Phn#~`0&HB-aPQvJ6W0%+;^=M6to({CZ!M3}_z87E5 zQ4(UDmjJK-nmOaB=2qm?|C-m7PQX?9_|M&xDlXB>pJZQAzLpCg^$R-6M3u0B@2D3k z8;0G8iFbBXNSxM!8kq7H?O19Un_<~non0PIt9hn(7GnoNB8=#4{%rY@7Eo4cB-nwv zc%D&MNRe8)uuLvhJ4^K2(SO~`>R^UM)vg@5*(dAh@^C-2l~>L5IDh@Sq5Qtl z37_m!KZo;pwRrPQM!Qu+1?$qAg0KjraMTH#s|oEY!lEwf(Q+vZy{~R&FEKfvjUJIQ zazv$$>E4XbddqMP9;jtM`GOr}S!u0gON_1J^U7V`D1MES?5O7@+YiWm9&c63x7pIyK36lLB1=a3G_2BEoFP#!={ zLJn-4D_IN;2E1L=`jxV{R%9O-PP*(S7f;7QNV_hu5i}dGyqA`&sZ|Y!3Dn@y^0&VS zbqtO!z@#}Yft0YG(4$%^Xleil6hmf#>P**=hX%Cnx3={G@;<~5f6I7oG z;PSC~3C=E3Q~mk*E-`5LLH1uXnn3+jU~$AzhM0|a$2fU%B18KCQ6uwt3rajo22yfm zjBJ;+s|O2R_k0WLZi1vIB~Vk7cD8XceJR}vBqpeUTrAJG@s-`M2+a94m1!j@BqhXkNy*MD|UHR z?L)75fj;KKf~q(~kw;X*%{OWV-acwN^5amE-KXl{*B)F2r%LM{u@lM~i%{uGgbs*+! zI*&N8ehLV2xxmDC4sT(^W)ljc7FttVqbA4x!;=8U;UjXTV_^M+o;zIQtiB0Q>tLw^ zLwy2rXH#%tOurMyrj8bCf$#L8mmvlrcxD!^+Y)zfpt znAQ9hwihg8u$?c|Xu5w|RT*gK;LrmTU#)ZH?+(S6?hlBml+(ploQLtRoo6@qxcB7# zK;}EebP>1GG;mZSL?L%#Vvn`+pN+-v7OYPaaLy5iB8!C~22K+Qt-(BbSG~D3!Zosz zLal4mD|L0%K7mUAn_FuQ${?GhG2pucNb|7(^#raVN2wOlp!hWtH@T`?6|fzUs||Rt za7?m2_l*%K5Do>_ii@GyeNpOzq7tJXU-*g*0q0vz60l$?TFUI;|CO_Ppk7li9|~p3 znz`>5vRM`w?4hRt6ssDKTfoGQ2mJGpp*F!YcdR$fL+#nwiX~>!Y)>qD2&hAjV4n{R z%?!3>i-`_a=Iy0jmRLi#(x6psbAhB%T?prfjUCR+Gbx-`tG-LRW}OrOI!zdXM5q?GT;iu8NIadR9oqsGtn zuI+`yYresIb(Vr}iuCkzv|-|&)414B7u&KYADQ#>Aq zNtM4FxQx+tjd*Qc28%4j`ZbMOTyhz4acVPB#_WuaHlSm%>$MTT4#DFVOar=ir~aOR znI7zXc(;cqR5~TX(Fvs{jTh&QbhK*|?(@bt!)rMU+$x6&UM;6b3U8cM(K66ADec=T zjYf{#M8$(HChEGEI8*zc2m>op+u>%l>P~buq~nSGOzD%>hM826_@~we2HEXb+EgIC zJlAv#Y=;8+vobjrYffcAO-%AYeFmM>!&k0Vc}vDcMk)A|LimM%`deuL zk#aM1cs~H4Krgrc3Zy()Mat$yX-OIbXp^PbxSaf=Iup=e8cmrHDeZdtZGp0QvC`%m zKP)=_+~i^Xd!dD24Ghed%fdN3LMJ`cbZ>?h7D!aSFBt}dvW)HivT6LfQK`kw+BlZK z@%1~$hMhhBkqY)!FY6E&@UQgB7g|hdQ^d}hr!m#Ofk%@ZqopOxjUAMzO1}4jGx~gS75?_M#xFT68?@q6Lo| zp?{>DsmRhE*167qL{ay;{;{j3rP3Fk*;t#(ja-Fn=Cp6@%^lw7EiThHzL#GxW?<}~ zVl!8<->{o}oc<`wk%JqB?}Zn*dAeW*P$^Z-7j8*%jad)48}{NM#jfZI7n+SjneKho zr;+9EWt0qlahI`+wJ%F88v~XGP*KUgzKI+teG@*|*G~QUE@emZ{t;e2z9YLV%X{df zZD*X75&dmfpprvUj)(IvH1FJn3=V`*gn(pBMD0eIi{$I48CD${Vg}4!O96=$9H_Ee zpnNXPC*OZm=dOddyDS9C&YAPsJN~o-#>=#gC+YO~iHr9?lh(gWRD_Knq?N`@ug*+( zoFc}es$1)e;1`zAW3v%#!Bv&&pX(PlJ=lKTAS$+VwxY~7fmnP^uH*IOJoi)-a{opB z4)G%Ak&An9rGO5d4}wSbZyxcFQH5;rNDgv~4(fV1mg@F%q=dTZcBSy|&H6WLU4m>j z`)pnOQ$^0wfBD47MyDD?*COs(`xpCRtv2{pr?|V{30ZdYgnZ7^ z%fb)8^K1Ct(+w{L0B?L|lk}uJyZ&;FR!}pug4gU6WhyZ3hklx+x{vdVWa4TuEV(zb zNb+v|-=gWj0W*#7r~3hd2+FQ|7L@6vefwsO!}>ij;I}khlTnMi*1LF!2H*rLBSpT6 zD*`B&qO*Ll`3MS`ier1}FYVR{eM7pkwVulc0izCw%I?n{OLA{vWRluu5-_XM1~4y5#~RW7`GB45ngs#TO^Oyi-)w z>dwQT{FLqGJ&pAq%cktx6{)^>DQf-YuW(klcY4C#xvd8dl>Mpk{dM8*SsgejFEDfR zUziA?A6Y;B-`^Zq50meuQ`m>CyvcvxC1={ohDda+-WxKio?Jug z8(O0JHJMYw zA&|g^_~5h5r@YU3WvAr=9gA?!Y zjF3zDb@vPlo^aoUg(Um#^eO!JzDbA*t1W=&6LB2i2HQtPQqsjStafMg9)O=2=t*mW zS{PJJ){RI?f(pNvqW5=U$DZ8+U?w?WR8+)u{o)3KqjL=no4*2pRD%S47r4fQD)(;+ zrkf~ue2!n|o&4T6y@4XQw7Il%6-X$)DkpyP;rD%I(30RY`LFdWs}(FmBlC%c_Y{j` zTrdDXC1=2D)vVC$-R9%1zIpoa+s#Q}o)(|$U246SM&9QH)3Ia&v=Xp|A#mKFAukV) ztudInZA7U(({oHeiY2YFrq^1K7&fy0!jhUvS34P#s-t%9apJCG`uxhl^`6rMT;Q+S;i) z8Lkt84$-l(*?%0OSwQ6^=WX2U?&#>>&1r!kYuGU69dX6l4h|^5amzvPIo!fXhLWs# z&Bd&NG7&0z#LX?SOwG^A>G$Ok0S&CXjWlx-5xRQ)9IZMI5ZZV0x51cwe4brwHMsQG ztM0WpAtlac&h5Rc96x3WX0YZlpaZr^@!4a~%Ivx=K;4z9{8DcadEzMt@9H-Gv!nXh z^4rRy;oixFh~FeE@*w#2BXfuQ=NUR)R;`WJ>w)x21MV}rK0eg|kT1JzK!o7WfXt=t zGH?U>tRA@kb#9e-9Jz}HDR3KIWUdc|7MQwL8?#8m2KM)k4gf4IPYN8b=E2r# zY+%Bakds+;+c@4>bg6pk({Wrkf~<#R5-ENyc{gc27X2;~Ag8p#obgtoHk)cxC@#W| z?CW7JZL66EC-k*?%7kbAK03}C69JB%ITZ)B;{oR z{U8qj*cq?&MyEeJUcUQM5ZsZ7c8^SN6c?Zi^)p{0$UIP=-1cF?bAyTWy{SS!BTvXY zVj*%9{HY%e8`+E0;xYgZX9fVJYpZbi*NbE-wumjB^IhiK_^VyZRcKKSz2&vB%OkhS z9AmN-E~U~kB%@*MuG)C3$lecuhrUTffNERQdH;yS;pKt?{-g)D+&W3!X5M2rzhYi_ zJet*RCVx+G>t!q24LVA3ERphF_5UC>UD)zj7U0p#Q<~#zHt7u)Urbu2MQKU_{0mq{ zxA>tD>RM)J3AtO=?W%Pq5#y762|RRc0fonQR`nemcu4h(5>T-3#RcR=R$=bndL|PB z70-Hu$(~-TQu7aSyLM84WpPVGO=c!JJ|2Zxt5Te84L6jynEm_VoUHDX;@RLWYue5A zD7^P+u+I1HrHQ$YrD8FaJQq&z$|D*!H){spWT&QTFETE^n+CYhXV0AZ983e|A(y!E z?p9h)01hp>7P#~jUl#!fy95SsjMR|S^5eOVIDj%7Im~5bH4`xAE{g*d)KGAKkjT?i zSp~#}Lr>jja7H7fN%annD_-Psv_*e;Q_&!Mq)c9-*a#>-fCqi&Tn(LrjV-Qe=qb)v z#8G+QgoqCdb)o}rs>BbIfOa6Yv%Qy%dgrdCs@f-uA1eefz77CGk{F)TEv{kFngo6! zdUX}nTqXs6^$j+bpoZWA%UNJfOd2hB0>D|L9Tzg#@KP4bjh2E|VWkf}I3W3k#k$?e ztvu}PilE+k34=deXtcWe^n>mqTj5sd86uaS943qnAqGTw0C0^l{G zzjvB0taUy{Q`c7G$5wv90w%d7Mcof5AOMb&>V4LKPFcuZyk#NQ)U6jVdQrQsM5Z}F zh|Xnjh!e6~KGzq$>6=WDE1sP&BS<%b2_Fu+8O|oI={zEgT_n?ThtRuomF@^@$nyrM4X33O=`CJWcSzE zk;ARxG{5M zQFue>KXl^DzSu7gw`i@e-u8})9~>2G`SL}j<;#V^M=XrG)(R0Uy1u@#>ef|5s>_W3 zXiKScQ~%a7{?GJ?|G=hA22Q}=D#nK!phW5Sr$->d_4~ttZcsh=`_mL7C?WcN{SUnC zVk~VN0Ke4KlG%8ao^T5E^Y;7=iTg0Rfz#BUVKroui!+#IuL&M3A zr{AVqF6dc3>WCGn@!OWA(Qj?oGJDi}=U;Q1l0;mYtkd@b0FDsZ^`{YKc6w<~=F&(? zNw6VffW3IRlttMxp!#-{#T%&Aes|YyeDDdzbc|cA1}A40oui>^MAn#CL$HWizW7t9 z?y5+xd4?-%*0U5uQKUTicA-g@L*GlmB`Rx7N!qw!{%HDHDZd)nd2fY%Uijix3z^-j zG_!2x0Pt%dsG~WXUkk98YVZ@auujpYfqZW_swzfhn5~*D<)HVJfN6tS5KT&cS(ztk zuUXy+^i2}|?z1pW1JFECz21{qt*tUB&=6P5{$Cn94PP2<+s8^W6ygwgRD(F4-bv{0 zP;-o)o10tQoHWm7cDsN|Prowm-ino??0*D^CFP*9r7@spe{*uF!gS64fv_%;>}yyd z+{fg#wGh_b0mECqU>YdWk|#mu$(tA9`x{&Y0H^Boaer@V7?sf_NpudcbI<%VUBc?~ z?H+Dv+!Ps&e47Wb_kP&LpfOdF3$;rzwsO2^bJIhXl(WIhBQW@U8lA5k$;t%V(g5hz zn&FFUHK1OW(B9#@9vD&vD08&-C?jAeOUk@1K z77P2fr^D!RlN1uglju^}Q6x4V$iJ7Z+sZofSbtW<5`NzKK`rnye4p<|1CHYfWRnx9 zPH*p6&g%y0OEcYis`kRu0j<#elq#oIIJkcV5t<0N^s2g1WOcFo6Nd zMj7Sf=dU3-;P`PVAZxam;COp;uRWW$g!X}>XzA^?gWaPzfDE}*)SDsdHw63*g`8>) zea&%Zd1wni4NT=)PjMGp!I7==B1@g00DG%H}QiG=&? z3)Sz=pVzmr$Ob6H`7I(qilgK0PWDA2{QaZs(WN#W`o_j-@i**1(++?`Rxe`ng4K8T zvPS&odDz+n0GDSQW4A>-*8;6`T3ck&i_B|%$riuQxqY>|t$|bP1$K6JMs_RcEX-Qv z^&gU6DvOK8IJsldtxx~4n_j)$`?rPlR_uS6b^X7}C;$JK{ilumpR!GI;c2FjQiY3O z@44SQ=Rin2+pAY)P#qm5-CrG{z;_@1*rWG4s;q>B>gsZ|pKRP`oa(9d2R*cq zxYu*}eg^;EKACLmUkv_vv_1E~zx*E%i~qUY6#BOv?%yQtNk-@`+gqtyjs8nOUv~!s zW>;>xkd>X?QR-i=^&gIffDi1ZE*Jw3567goUv00bvv^MU1HiR^DcxQCmxF>v$@W+H z5!~^=xg`FR-2d0AMCzUPwzk(cqQ%9{E*RF=3sg|P6LcIRAsV?-=K7JGYCFu6Ssgk|(yj$^MCT#^yvfk> zDLB7@AFj2M->*qTfi5Nt?9~+qfSEwc)nhK?|jG> zL~UE5;p0#mAy1Y7kA^C<`eYV0lBHl29dFS8-W-_7L;L%C$V{qTrU+_GMn)t-Q<&JJ zCoZPq$+(7dh`lVT;|$Co<=j`x)Rh%y4L?|3GGg^eJ%IYg4ef2Eq@X&kHJZ7MezOMx z&X>G*z6|Ig^?O6isu~(GH4w_9Ss2OoOEXN<)I5?^R>XDR2&e4N~Olsn+vbIZ_Z43=KvhFe}u`n&A{er)Np=-}vgo0)1|wnVUSL8RVU%F{J^ z<(k;pIepK$u^fhrm#0*JhwsD#8Je>^uYemWPoy!4m3%+H(8BDdO(Eyl12-WZ@i8^) zN#USzs4JDg2Skqi(Wu3PLIK+MQ3WQl%ynEUD>HK;Erd6qo)K`%Il$rvuF9MjYO0{^s~&LEqF^i>oScgb z8IeOTV)$e&<#B_ilOC-0#^4;50b>xiG?+hxr&8>3cChphJ3*nZSa7#I0RFL<69THE z-^%!ue(&s{sw@K)*i*j#UuUTfpR#WqoJ=D2uQV4YEy1w^9{S$Tv<&68olF8IWh$Vv zz|^4zv=*pbmRdI3!3i^3omnRWL|F^jbp$W`3A(8tZD`-0-3j2oO_TbE zY+J(U%YAW7g}@VRmjbWQl*Khdbu4u3Oos|A*xN|spGH?dtEsByZIPBuTl$cvZES5F zKYb20X>_fR^{>9&e5ctlCN?`+);AZn%s=kER!`M{0oQs;q`6Fi?&#|}^N{)zs*(mgMq8xOv1|?IJyHPJ%$O~lo@Kp9qEy@s zNzT2;9i?`7);X`QgEY!$|0|J^7E#Is+gu@zYcg(|?r7h0WRjV{MOrb|hrv2)-`z*^rTO`tw zajln2P|i4TY19HQk-3{`I;}Xy-LWvE0blH0fA*{@5@5$)hVTjsqPMymFF~ZHpEe*f z0fYPe9RnC*4XooOo%HLSr%m@DNckx}ujLh{ElF6JU9S_#H@|m`FO`#@l=IN;Rdx=B)B-RW7TTF{z7MKBzU$k+ARQsqev(+yf*?X%?XK= z6;oP97e;DWnwez=kO&YY6*n|JJ)H@6hry!3gf+#)GKodXvj>30dtytR03f#?Y2&T@ z(hzVNWW8+A_AxdN_dRJlx)dg~)RPtjS~fp?Trq&4v4F2xTGn$VFBDaZxkP9~+VGo1 z5|hM(RD(hjlnbU3yvW<0%`~UFnx))np6o4RQ0a%i2GIo|w)mHKV9m|V$jt4qZK<44 zGyzpSL5O{SxdRD;9pkmjmR!_%^+4BuLM>}|PShU(n4*Z~Z4fs3!X7!0e&;2yRW!|g z7qdVt>wSde&V_JLsXq6utQTO{LoFG#E6a>nwjNg)POi$4jKIiSld^Z&o4#;CN^A> zCN=Y%T?8-xaM`6JZ~(B?LE_$gN`6U+c2f~c9-yrQxQA+4scLHWKG{271{{{4ZO5NU z{+n8!bNUEv3kyT|bt|QM7vqwLqH!7eJd2a^wwy{FxI+VS8)W41ptgwTh&9_S_k&qi3k(UQ?O> z-Z3~pVddrK?op(dD_8-RHg*RS{T6vT-)e2*VEy|3*3tw?nMu+kvqG6wix)K^HBYuH z0?4X&%pXi20gm>KrP16YTop396a<5HAZj&)`DsS|2TFnfBGns>};(0=U*Dbh-C^2&>iEf zMc2Y^`rFz&@aUHWp#B!9X8#|0ivCk2{vX!AcV!F5{twYMSx8^?d{X{ax4dPG3LAX) zodw9a@$LABSRz`nbdk=brZx~%A7|$G!n1tAVp?sPDw3h1{^yYO44Ls8W?a9i-|Dwdk05PIf(~ zy#`%fr04i^9bN$e;2%gnnk1XQbF%kz5Q)TVB31pbKHJ_%XS#Gc}4V6zMTkin_JMca-dStqy;BeEphQQi7tk zd|RLx?w%$=4BF=K3YWDe(hy9ikSfQj*`;TK(UI_^=8|Q?u3jxBvbEx6JAUC~Ppc zxGun9^zdOG$X9Md+;W6w0lOa1Ksj;zhP?8e@}OAb-6{#^k)qGvCym=^Eoa+}lJMWN zn5|M}JPd}sHf|3!9lSa=>*ZJqED{@-a3#s9s3fVEFRypZYVzLRI~ zdxA=lw$08VAWw@aNCabeuuqBu3?UGwBH3gcW^E@vDHrV_`cBw1w9?B=g^PlM$l}Ho z&p%z&`j}@{>)Ud1YM|gh`^xxC*XoBeCtfitI;E`EStFp(W4m?>hGeM2+SG%{*WH{R zZkmzmJsJ8~4n#K{<^|}p21aBARL|PS)S%cb)3$hD)1pzXAb6q}yjngXp+S(G24uu! z)lAtA_r#5IqS1Uf=Ch$56SaiAfKX3B*?uBpCeUSt0AKs>udTrVJ~Q%YLZ|xoFB<)` zB_I%*Iip)TP*9En&H_h2L9Z|LDR$!**u^U3?A9iQfx?;~-Z~zH3jp#Vk*F!V83&4L zwSuBrz@31Bf1=VN^lnsLbtB^2zyUTN*_QSa3}t8$^0sBExT#3ZU2;EBcJ$3u+}w!!sYNs-OK> zffTLQeB+DMcyN7s(45I#j|qC~iAfv>??u-OR2p#ujVF~?pI;Mr^WDXFQf-fI?+8paVnx>PgapL1;D*0+MU)Un zNsqP#^gE_v95AHiMqT4Vowz;2DhCtS>AKJ8zfbjDk+7O8E2oP7QnQPl#DI%kymTa&3Y9EWxUNIbKpdN{L_K5RqxJoPT4g3wL?xSED`vjWO!e%w!KO)GW)CmoO5nHv9k2Tpj8Eop;Ma?s z($EsF%w~xR*Ie<&W)Ua$jfS1xp0VW*d>9j|H`p$_YYsr;{*JtWOjWI{rYTF6D$bHx zGg|em-9?T&tGx)8T`s#qqzIb6c)1kob>YC##;W^)I+PxxZ5ZX}e&EE!vz~Z8TExq& z?OLg`m!iDE^v8^%-Uo%T+i+s60liG6#yh^3I#!nKX6M{5oRKa#qfXD!gvF_Bk>YN8 z9e^J;g|1Xc@SFwyQaz;1=&p*Aasf_sSJ<9M)Y`r2ZGJdzRZ9KrL#o?R+*g7!(`5Dj4Bp zh;_6ZKn~@JOkb1!d+1rgXB~Alod|*m)bdJzl(7M9MW?*98QZ>b3oD=-VdPtgCaxKLLxGRBlOT<78 zzH2V-w+;M>e)qH253cA&vc7TTY>H$}SJ{^?2?!Xtfp#2{o2A9{SUf<1Xad>Q^sZ7W zFBU{~a}WHON%eb&6+T2cYWwfNAb{z}smz0Og%sqC&}{wczof46NjT8vYY>%|v$Cx2 zarMjxE^OSX`*A{9A7oH|LU`|SeL1klmCm+DcoMiKz8<(LT4!G6$sp6(@@0%m3s0Rf z6?hs|$P;*_g_SpH*yU-&%~0wjisfx3R(lb@OWF1;YKSOGz^& zG2aw;c^}__#%T%US6X>JlxA9UoEyJl;#lfmLf7$PSPflpG4%Utiypy4|GU^lwJ%jc zoK{av8mx4_vQG$qi{r@TLi-(S(!5^DLsE|Z%WN2~zWSyfy7EyqG4s9^{3Z~bY3fvCLgt&sJhN8(zS||RP0$eMQ8`DE6WbJV=TWtEmWAxer)t*bTqPGZ#F)L%XxvAF%$46CJIRE#ky9pgyr6 zL;Le#=66?JN-!z~k;#S5vYCvV74>(4m(3mhRhO5t2f(|z+>vb}5MfaalSp(*EuoJ1 zyz<_ui__6VSjL4|cR{)_$`O~@W~uk^yuq`=ZMe4i*9T4^K?l@7uav;Xiwi>7b_6sO z(&-Y5cfARsLGHQRTba(1q3-T*MPyKTAniN2M)hc54;F6FFceK^l-q+-TNN|$dNaV$ zNhH9k9oeF-crRjd^#XFw+&N$K?YUUKiFn=;`UzD<;*{Y%2n%>M~u^X9#@ej&oN=&0o?B|JMiAe*sAir(Q*@JeHDFZDc=hwT(zG=C6ALuOT)v@ zH|lVGI_^sh-J|97VNNH`e_h!ySz^VdWl5-ONT{pD_d*2;9#F8ec*R>Z%{(2ZbnV_f zQ2&Zu&B^(JO1+c4+S2`4%6AT#`A(fM9dJ`^z|@pJ+9mF0!|LKFDtzTNC{dm^2*-~9 zT!?tE{2I|Um-fY7+tM&~IC8xFy!I1W*OS_oEavjR-o6@wnS|cOO=U9ja3BAyeet>Q zy)Yhv6IN#g=@+dfw#HiP%_9>Yvha*p?&)~Fr{gQf?COLD9od-}%SYK!hel~hf^!kP zJ58Nu*2asy-*U%Qh!$aO%V#taZ4J0=ZSB3HGdE3MtvBu1N`jS|7Eqlv_@0#l5^?w* zlZ7>hVu~4U>pg|j2zMGwc*Dl&?_cG0?Wj<}-5#STe)C=DL4M}vUYj6$*Di-296!gy z7}Sm(&nBVJoeDuN6$RmPIkj1j3f`d%M{X);L%MsH1w9ccsUpf1lkf-AtLlW#)^NSd zeknzxpUs}}m#SoRz3MJbya(pE5E*o9pV=?A3@GEx+BA`lK1`q;DAg407it+jjeT>Y zcY8^6ptj7sP%=c$j}~H+Ke=~Z9C#IH3aJfIAMmNUiyj)7%$DnD4Uef024FTJtnp-K zX>xURqYuB^?iy!!wTk=j*J#S7F(3Tn@iFYj$wY?KGTBu8q5mwY=MJSVezd*xw&bCB z_cP;OVy7t-^7A<8WN-zuhb@2YZztQ&$%iQ7d3PO_buiu(*(rB}w?~sAxSk4Ja!O-( zsn%Sl%D|32sCD@t93DJt+ltr0K7Zg#el(OGoe)RMBB5uUx=DRpnM1928hx(^IFx3% zb;%mer3cl8M*|JSq83!-14u&}lHpUa(|e}&xA1D@p+?s+v{j2_fWiCUQ0m|ZQ`T=w zo`jsG1~fsLTj`%Vd!T;DK;XOycCj9c$m)1u^H04xgtJKv?-TM=x-RhvTq`KA@x4{*JV7VsK74(|Q&m)9 zTNPKc5!op{=Rn$hNo{F#SM*tXH_L&yE);*TPxO-6XummH+ZJ$u8V@w#ZfL05jWVkP z%c*kzxmH2WJz))ZH%P|Fh8Ra;4AvMm?pauBV!iOb!F*9$`#{aeD3OmJ=9^-!<`Pya zQf6ssCpclKt`3E>+)xLOK&`-=6=)|5Axq95xFXmj4S-vCTy)wefIRzwlL`L?s)aC~ z^tT;2!b$v3N?PDODZe$GDYFwB1AIiYoP2z$7F!Dk;HG{goF?beJ=IHY?Kjqps$%~`L_n=1 z!NhiK;CK=WnfV>5=I+Mj(h2A0+!ECNJhcxhoBV(YY12XJt|}Beqj&BC28Ha)BIf=3 zaa?TdvScb}?%<9+{{+ItZCR)lP`vrNxmti6b%ACX*uwg6NCIam_WYd<#=N5TT36sD z`u~){;7#L4OWWqRVS51uu<&?lm7u2VWf{Z2qg%uzZ z24NL`xGo&`(%5)5Ga;&%QrrLS^G^88l`X-(U+2BL&O;k7!XDN=nweP@MOt^HWCJX3 zJ6^M+-lZC)nUEF@?=X=l%==O?xK5cU?G}Hv_ImzZqg<_2dSv~{l%ctemW!E_lTRa9 zcNED2*2PZoJmjDpD*@$wvqI_v1_ZjNnqk=-Mq{e_0Yj$;-7t;=LxY3KT`fN9n3Tw2zQ;B#XNz5>y1u_Ocm-_e<}N5OKA|gbBYOv~6ykh`z3QBA;3nDH8K^M%HV& zd%nkO=%d8NGj@C1xr^o(Axbp4_>8v=Xr28M|b0A7( z(<5XWIuw;0gi((f=l}QOkV8GTx^J`)@5{cjAg=&e13ME^#V7}*-=eK)FkStw)gwerva)B zR>%{0Ep^bZj1jVUCv{EoQEX0vbu8Zx=ZZgke=x(d!yBUTwj?DaiCii<_=J6ot$Sw+ zudltG)p2<=*<($@N{865&H5^U$Up!6CNOiXR>xYNVCpk2a7@!BBv4av3>1G5&gpYi&Kq? zGEV7hB8_bE4=hEF3#3Q7j5d^~ma^$vb*DT$xUSFL!x)V&#}_Zg+@^}i&)yJ6<|C&! zOrTa=Oh)n5mm+S6#9QtZHCKt03V2yDYk;p|;nCq%Ef0Z%kOngIezu_bg6PSz?BtJP^Aq~&P zgm>|@Sp)h?B#^VM6!cXjp1Tbldy2n$?B2{yVcbfqg;jH>`DyVa-$PByj@2T9a4lENY<@6511_l`S zJ45fMq+D6)j1r>h=PQWrO)i%AESY6RS|i~m4No7x@pqYMV#EpR+ue1I2=J4-uu|U> z;3GA7Q2(vK@qv^>+vz)z#~m>8TBk5n`sxP$Ua6&yDhlYC#*WglkGldbs_|BLkPAIs zH>ZndoHhhCbS#-I3KmZpgv7?4pXj?5XT`ydqN>2HlogCVO=*6^yXvWGFfGbQhly-? zSkhH8^WI-8;O;M-g^wGfjpeDUCF}b<D#XN1gKM)1`jKN@&spGIW>L%p(75%&}vFm#r1nmV_t6gQ0J+1N3>U6W+ za|-%8^`&D5r;*5VKS%w)5d&L9;Gq(4web*Yk zb!y~lTl*N1Xc5nK{_+by(bF5g3YR{#B)WA;CyIK>ru(hR`&6i#`eIm`^JJO2^Mpo_ z@jmO%3Ux@yw&aCuf?U7!`pTy=;r6PJ1A554~LteWWwJEn34SI_=3)`kBa;5Ien|PvG)Hrh?&!okor~Am_LlSazhtH+ zAfBRq_fD5e{m#HNX*VgjM#5Q$}(nd3q#NL50jQ>guI;ONgx0bx5s z0W9eEa?eew0uPKa?UC+&V~aO3Vtmgm7p;=23serY3Zx8Ry|3bAN;CDcPx4)$`Y17E z*Yb|WR4k=OOfutX%51{JYuDE6OTSZ^dX`?vyxMb9H=(fStF5JyX2`(b0%3@z>sKg^ zLe8Y$e8iLDC<=pLYwNZw7IFZoIq=bWSm{BFzj; z;W7NoT-Ps9B_U~#omhOe?k)H0gY+{qPla! z{1%5Ul@Zl(K@6o$58aN<2O$|x1`QPRonuLV?R-85;Y2kM)$z7S zQ7B>Hx|HL^5~Ell6u;b;)yN_fcyn;ItjJG#BU;mt{@Pqa9*x3f$8@XK-xc+gJFBX- zLJ3fvdXG>{!SLhiHY-&pWhXfhbb3I0u^{ZEd(BSElgJbJgRgfwr)PZBarV^UBT32px zC73v&^mNT~5vyBfKlJsDGZ8{{!=7XB9KLqgJm#v3pcP+M))d`)bG_rY#Ff3&w^?uQ zC2}M@l2Fl-H`9n0Frv`-G%wNk&MD|S#<1^9(CWLtyzlY*eoGwqI~K_-G&>(RqM;JS zR$iV8Do*qE4(quYt}XO`d|`Fx>Ar-BGEEah{J`^12K6$|4UxH5RCJ>cSWa$oF8O9s zQ#hcN8e>-o0&Sh0gWq1AbQBczUsD!?T%z%<&a(L`=j-&#Q#>W;qQUaT+wYdAIB}I{ zG&tJsYk~w=D`Ky5>g%X#^&5tskqYd`c{WrbLk{1<_I6_k^VXHEa0XNaW2Y6x{& zmg<3|rL8v>&nkt)MG3N)72o6c54XgO``vxblF)TG{A7S4>0ozx}7O*bZ z|EPOX+nCWnM%*h|j3}4QV(RW6Oj=C zYUu{?L>cHcIEsW8Z+t29#pA9dG-a1gQpM35+E_e^=8JT>e3u>V$3k|hQQmq(2C`wwFsop&E?%g1{`0V?K@HUc{{jdARlJp6wLI1; zplA$IWr1n%cwk_ z*tu{2g_1rI+{XbL0KmD%Gow8?-!N#`OaNq8o$BjTxmQ~qc!$;G#2=%)yJk|5yOp)~ zPJr-g{$vNYi*0?``$BQAXG!~WOlc!Dkld+vUce+?o6~f$$9pKd&u7MzAimW;YF(Hc ztX_KM;=uIhbG|0R0vvONQl)P>6%&^e*}gdFDv?UZn8$?`HYt@QL}n?EtT+xX@--7r}nD9Z}Iw16g4Mx1Q=_ zi(DH0p7mD+g?6VD#(lF53qAkR({XAju{vxUhFXjh1l^vR)FHr?IoSBbH^auVD*9<5 zD@^3)@7rSinf-0e9fN|*WAK%dxiUr9{5Aknh)Ly~p zUVT%ZI98axRRlc|<*N-HH)aecq#s71JvTErut>g&RWJ@|v)QPcn<0uzMp>&JQ?h6W z=g3(bC$pq9A8awL#9+F-o9MIsc~=rilHclhvR3i(M7s51MZ#d}`ho=1{GxDFPp{7s z1~ESBYui)G9I(=OLB6^oM|0^!@YXx9+uW}!mtmQ^+h9#*1q!({O%r1Gk|##|)UO{3 z-al`-`a4(po?R^9FtjYR+4&{`BkoArm(N?9ijcVY{_zFyn6JqHU^`;qL`b8crynt3 zg&>%~Inn)XOjcBLW1TdIa7Y+m0M<*Hg16JF4%p%Qvk5}hRU7Ko-7r(}WHlTJLk8qb zVWZ*}?E8A@PmMBuXkRwV)q9ng0Fk*w{;(I*XE&OcZXi78D&^;Hcr~7Kyf$p*R(P}; zi@|Q}(Ko-8yJs%w+~-T1{$Zc6!)MaQDkr6@xc{h@A>e)?UEJ1j8H%sAaBoR~16|$L zd&SW|$DXN}xf`*^vFGbG^te?I@c;QhC6#I6!oIrlP;t-EFW!<33}6&VvZ8|H9gtuH z^8RM~5nG}r^Wcfy+_(FM2_AlDA}XKcNubM{Uc?_V5(sC%A=U?TMPgQ_{o>=A($2V| z{Gq&?W}97>-t0n zf&N-tDcU!MJp-{RkvOdiTxV54#m}~3TwjiMAMW6{AL&;VddON~Be5)pQt30XpDSHr zC4cCbPT4@)7Hj_<-qZlTapOg$w!;sXInf6Fv%~_PxYs1Rj>kKnzE)PK!Fkeq5^%sa z=b9*?nJQ7e+Zv5cfw)mvvQzbnXw?A*?9mjz&6cH{0ubDmt9cTl>b9;Uu6XdsT4mNo z_m@zz$tZ)cro4KE9i$ML+jxe79&HK=rO zKjd&a-IbCj#xm`3odZ2=+3N{I4b@~?7ar`CI@xC>Z3Y`-cx6 zSX%D!pk$ZeJQBW9RMhTf!y!Y}CSr9lcbxS_p!Pky-{fZ;ovsSbtF6@X;@z6$o?q_x zz^2Mi6LfT>@xatox%RVo;*Ru}<=60?-R#k-JgIR_+qm&sKk*$BEP_M+w{&{lf&bkU zjr~ zCyD#T8^9iA;|{uBp=WGn~c9|9KC)=PI?EF8@I5asDR~yIZrseY40{l@TkUu3dx{a9;glFY`Z*Mk|bwU znlmA0s<^YJZM-&gESRJN&BnmhdK?vn>^DmTTz%D{>YgZ}i+@5~ARn%I7Rwa1Df*T8 zo?R}2*3PUsbxQlezZSCxX2--A*^`d1K)MZ5D$Rqln%EEG>UJWW7t==EHs?8`>6}url4rHZ1S3DE6 zgO{JbkD8iczNaaWw+%|QPObmQ|2|YJIZ#H{bGe^dcwZmA-JrbgD@Wc-1qB6%p+mcM zX}*hIG7pTeM&)v>G6P@HDR$C*qgs)2*BLvIbwmgB-=Zn92ZSwgn7;$jt0aU#;SQ$x zn375JqTPH!Ps{GEu(4vj3x1Q)cIe$Js=>5?9I3uP`_4Q8-MgU{F3eBI9fNk zpD>9L-(95PU9Z4o$v|vwkWGll_707Ky_ZP_!d@+c_2r*3R7=mCG(J=~gWbwY9_L=2 zj<6})ot+u7=?q6m6w*SB$(RH|7t`eJQ99bYMfzqSI3!pdQ?=F7{Vk%2)$Y&+hji+1 zhRLvD^mCpVZi1O+5G)*sFytz9V`hX?+9;j^3Zm={!k~MHunmbs;+dYYu?UQ&6g;$p zg4vL@C~eNGw4eZ}ZOrcM98sJ{xLkAbSaAGhU}>y;#q&eU9nJ4Ia6c=IXA!w!!tK|l z?^Cmh@;U6e5@zK77hSq_*U>Ea_kqo%>vC!VzJDKsbSr!jKNjo>*2j{yiZ9f(%6sv# zqe{NaA4lpZ6eZC~# zC4)vx$Qi$(L0)=Sy$eCk^6EaI|@!vo^a@4($ge|9*qx z2HQT9yn9Jl`PcQO7z{O=Lckvg}K@wcMmCoQy4~nmezt5;a zo_FUhov=UHt?xL&8B^QqXvky#3oU)|utTa`EYzO1-@{WR^6dkkM9j`+Bh^eH&t=HN z;%Kooi})yVKY3!jZ<$z@!@VTzoX(tg_~6$!E8BIh#}{8NO3L)DRUvV-1>c>48LSpL zbR3ea3@{d}Ys1`K`wn~Aq<~@-o`uB+X7oj;kuFC@ZTBUIYu9v~Ogf+fc|9pi?a;se zRwOq{0p4YE^77y(7v})T6mSB7qx( zIYGoi!CIF3`K~LncQnY!d8_t?QAo&^i~T~*eHPRqID)LSjEk$pdOR@n=u`(EuycH% zil=MG7m^`OZuwazoTZ48C9>IkeD6%NQJ=r>FYK;_(~%SHXeQ0^J4qkr_FyI&`~m?L zm1gN_-v!hlj!~f9VL+Z0p3^;Hw}J6X3JAh0CG_f^fsHgAbsVBP6I!VEU~=f|m_p>x z=A%#MW!71;X0#B0R&bhp>G{zWHJCKynX*Ny&fP%-k3=6%v5lqfFQ=Jtxkn-?u-qK2 z-8r=V<^YRM3hWd_A%xs&*3j=u>$_tzi1Mrv0?%xPzA698H3>|`5t6jQ4bCyz-WosE zw6vq~Bx1EL5!k6VCgibRrBjqtw>QTGjuNaj3AW#=I}{BMQ4u5z=G}M0V==K{l_V=` zHdOYWRAqg)U}_Vh6w5PC0}0-0oQo6bHTJ5#+c~{oFc$dGw!qMMP8SEeaia+&i@(Ju zRK9HYCuSD{5xUMo^)43J|5QO`V?DlRM#hEU2rUFhP+nF18Xu&S6D)T2jo+rJVq6%` zHoK8;AD>)Y&mJm(SKVTs*o(6wMkemg@V6g1M0{P)PZ%t))J_aBON6Dios}^jD%%O{ zNKGsVHOuovT!MTcA4YWM+qd7HjsLj2V6KJqP7F(D&*qd>_pnrncqRBpg@Y(v8Ae(b zD@YSuS9a#Vye#KvS~RNptQEB1CDgjOIMgHZOzqfnhAx0e@6JwV+nT*VH|&%NE%Ft2 z6>`dINev$z>|D@@a@hm9mFWj^9A}c^@t8OIr2_Rx z5ejz)fQ=z)3dQ(JdtH1lbd*6@bZ?OY%(`AV{pMNZ(SZ^YWwrR_~}>LhKt>6 z9}4UQF#FlL7k9C>rmW7g!IAYPOwf=g^PRHjd<|FO=(g+l@!o9TePL=YH8R=)UIha6 zAI5z*96_X@hV{LZklc>M#2lX;LA1WTPJv;4@nW%L5UTf^!QmNu`N4qxaYPXI9AJY! zAZyB+N__1o^x(%$6TGzYNd7o=aNAQ#<&;Dp-1-9U7&5}^!v-@w3JKOerzEkxpWXYo~oXNHF z3_SzGVq5|vsBgNREo`&cZ}0ILaSw`*GBnrs(2rZ)Ze$~esem}Ra-R(b`ORrvDA~k;@ZW0b-ER#7Y=I#NY_x-l>r(t*<2h36=%>9g5&Ut5*7O;`eJj z)|}J}FVKg&*NpWCg=_ReJ>=F;sk%oV=gI^*QxceE9P(!6WzD&{o5Tb6xC_=K&+Hi* z>KD4@c8XYvCGWc^m_t_+Nx>ik+zA@yEc;xu!b-PUb?7C(y&HnS>lP#_1u14qpLHc= zKE9r(aw&idS<^H%&=t7rmY9uMMOap-r4ZpZ|_vHYRy%&gB)|jX}P&fuvdO4py z(+hyU{F+@GB{X3Snn@otvHbYxOb`At<<~ag>>gh#yvjT9qgos$3gA6^P&qKqr@jYX z?vcLqVXxDxt`NkXf)cmv10akdsl%eh&c%1_n=+6!Dl01&F{t7|QgRcwnr*)AVqZ0? zw2Wxm@Gh<(|Fm-|ExW=6lYL++9((ayzEJVe{wLRvIh$YK-++MgfufNzgxw5Zz(LR@ zxs?e~%jORZdjpK|!1<-I`t;jZw-*SY5+Q8Q!;I$A80^kY*?R508tBd#JcPfizd7E*p{TgK6lb|lcKFB zldR%iB-ew3)!Sa52t+{kW%Fd)_f)Hx#7-nn_T+VG_9vrF7oN9G+8^vwT6evsK6{7= z?@Sya!mzME<9_;ig6{pAnUXxwV`$aNZxS18u!DV|tnFX;ejmTM?=6{KU)|kg-%~@K zJ;6q;y0ti<;o~EUC*c&Yp)fE2gBP;V=pu4NyQno%L_|c-+`M>gs>!yeu($?A?D zC*kn0m|R2McW1^>&vWv%6A2IFYZmSXf!po}l!T=Q#)F*u*Ygwtn6ES&fvBDY=x6Y4 zA-u}COYxuvUOCGg!76g>bal14I`PqTXLC>E zOKOX$L#jSF(h8SUzeva)Pr|=>KsA_%jI&RQAFM%YKI=|g`@L?bfrG;a=5lxOlbG_z zeu@l|o51Bt{~gx|W{;9I|3$A-fd%t}-GnV1tvSWo_m{_g@g?T1rCBfcZqYNc%USF$ z09dOlT(fORnq;JHSz5E%S*Hget#@lh7U8}?^j*xKSO6@JTv`a`%IjMkq-_;NJ%i$V zAkU#OzhP1A(9*zgi3NT-M!Pv-vQJc7QSy^+k3246*k(_@$I5P zGTictXr-N<bh8wrUDU^qcL{gDoC5{s_lbT7^crMF;w1at z&6pbC$^_!@i@WxkAQc&7F$p;nXanrzUZtojViK3k=5Uk%3(|_$ZZRC9E8v>TR0z^yws{GN>5& za;qU#*tIUPIvqif?k$PhN|UJ^+PGhjfBbaD4K8kJXI&dvdc+196;`oUMKaQoChALu z*wde!zP#4nqS4p2uQT$wX8b}_h&k5WdEYa;{s=WfCaskNqg}11Z8pCTR8;HraLlsDmRf9XuHl@NU21;t#n@LR-&w^>BhQ=XZmjYH`BP-$$@CL?EgHErrD7de=@78 zvh)!(Tnu1Y6VnG$JKxTmG>kxlRNbrAE@Hc7G|{ z1?|e(FNwQms8E)UMGk(;^^H5l0moio~2(lB`53SLR z)FULu${*A)7+mG$x-|Ks^SC?(LbzxJB7@kU^0ocBZC2XeH|N3>U*|KzLBfriRZSwx z5)y&4k@&&bGxI}lDM;|yX0u=&=#e?-(fpU) z@cB=`sRB9YEMtp-oYk5rc;OH6y9$Nj8p4&&AQy$fOn5AaUTsRa|(w?|Z z_>2CGT0)d+?>pG$pM4C#W=4IIJMm)eWfvy?lp1>e^G3Cc!?pDe#V;2_(OzdAd_KO^L;^+6$PmdE^H||Oq1a2Ea7b4?mGXnnfKXH4f$GFS}W3+)4 z7c>$U$hUvRaA=A_7<0hYOJ6|COPoxLj15Ax_m8GhbX2q^8hwO}o+P&$OEKk_KhEA= zg0-@4_j=%*aN=3jCJy#V;vbm*{K@DCv@YE<8A%OSGlep)7u00Eimf#ZqXJMIrYi{t zX9@Vn6o)wcP4#fJ&K=#T$-bz%Gf*B+)(ROF6DPqyCjll3K0DQ`9n}x^4b|~>(ODMx z?rgDigy&G=#!-)L5^L?x@^5^0TI?6?h+Zd4K|EcFr4&Ed$*tZ3no28kuV%FYL%O)L zx2I(Q3*yLpR%HdCLX1PWsbqala0?XU`HTAn^*QoG-?K1H^O=UIH1P>~X{Uh`St}dV|#fM;=ufRCBIz9-YSDwO1bCG4wQe`>|Nlo`w zgrEGgq<+S3zW3tu3K2y)1z7mN;Fn^G+J#;V2UDN67Yp`u>|Mz$-(|-b#}eQyG5GIL znb0Gp0m6lR#S)dTQ7q=swhC6y2Gw4YXp9l`>zmi0QP0A;457u?L^x?FGDRGhD@3qc zfq#fJMBJ=DTtvZtp+0@BG(pv&;AYB1UylduDDuAY1K~e?6u62IIWHz)Zm0O<<(;%3kwjlMLvp>+-lA+n4t%3 zW95+Sf&7hhY%v4W#uvGktx#qIN9qFIBn=Cp^AwgAeDOnsMJuBaHqAp)0@6(dq>X}!c>$}ls|6O-Z4QEdOLZB09`SJ2nSoMG3X`@I z-AHh@*wEQ3C`WZJW=+x0GJ(ue-25>6xK@2m8dVwlN@CO-#P5ll)k#N7CnHXR;ezet zoXSFkiXx(Rd1oh(G&k2*?hsS^O$*}q9?XZt+Dcc}LOT24LdSrIb-N@waEC|gNLeqXHR;NvRn8bdd!yGfvhBCQnVEN&+sR? z&)lwj5 zK~Fzy!Q^ir>6?)G$DRVR8TPoe40(1o1IO&z7xg~J1La-4cz`8;a`vIp`g&V&_uF&Q zRn}XqK601j*QV*ntg^a>_B;}!0DN{=A6&oKk4xK*03iYJg2lyWeYC7nKlSJ+5Ya>@ zO~4Iwv4-DUS?migsl~6d4YCAZ}Aoy-xN_uu`;ig=8IVrR%#SRJQdfwx8& z%t{#hcXv`MkE5V2)NJpjp=Qlo#mdYvo7R$G%bw1Vuit|yNCV~el8)mwM!*?gKLY(E zN6$mQT*hlrK>2Rj0{h$s_d?nTZ8MHw4|390c`lYG$y&p?7D}4f`_^apV?mGtZw!z9 zR*^J0#lm!t%F{MKIWy97u4001lJEAfQ1_9Ug#6M{omVGmdWX=Sp(3JX_!S}!a4f@? zt1#Q7^q|?1XwqHlg1W02y(D_a{v*QxqBZ>Ri^kBH_|3Sc!WF?`D4%+#T&K6tEI5+= z?k1)A56o^Iv-u8eNOc|HaF3suAf^RwZWmvm5?5Rq(b;e-O4j(_i(GQLOZ7ACeAeEP z3__q{YQJ7blDtNt0lb%RF!!x*UtT%njO{n`fF}z>#m+U!!BSr`Cz@#B2^f=NWW3$4<+C!EbQg-i1Fhm(Z@l4G(-ENYcTA|TmSe1$m3_I z1o>NaJYb?rg=`mdwePL15`h zjlu}lU)R&u&nB)C<3YNz!WP}D_Pd&Wn=$MMStC}*qtq+wll84yzXH( zsVOp5qN^e~#I~*4uiTjsy-`5DHG)pJCS1MpXMkbv>&R*+8Or@f_scokgCl0AvTO~T zEl(Fm4Rv}{JErsQ>i4KzXI#8Ybc>bB z_MoKCoV7dKunI_ip97Xo9hG{8NRcg34lCi zY#d-avNBUVN-K2sV?3Cz!xL6wSodoxj3;*8tLirN-oR##odTG1=NplI{uM#dW~+8@ z@7T;7BSPx`m5Qc7VyDc|SMZC_#yJ-m8qF+F63K{cJ#(Ny>U;fXio*^e+8e)L!Tw$e zL!RU^bfB5<)AeVTZ-zzqb?&Q5L16WJ77%1I0DRVr>HEM%P=i@AAP?ZNY8sP6kp}pk zmEE)5FsT;b*R6LHCU(8IcgMk)kO|aZzcE2qT9gk6&zCowtREixGGv;rK43nP|KRr- z#&ep@ET3rJdm&9Tk6z59>s!T&1^j3+15;n?`RQNkm{x3+aNeS^u3?l~FOg-@k)IJF z%0)5UJQt^+D1Il%54DBx@jWx2g?HlR8+WatIr2Y){Jd3K@6pwNwS59j-m zn)i_9k1qFc{}42xK$z)NnItlL*t_gsV(JZ=uEM7>-rHo2?R7{a!x>BK{5>lVPw_vg zmO#3*m!tSuC+zmu+bcVH594jkE4yZio1sM6C3AL>$kW4bfc2p39dL;zeOzTk-_)e0 z-P)JECv-p|A|)wsRE}PyupHP_EXJpfUQutz%PZvf23&&5VeRVZY@mwmPV(sD_maht zanxKQz?9W;8AxbaY~GDAM!9ReUpMc^WQTWU+w7-D%8VU>42aGBn*7_OCd+)kgHArI zxsypHu)2InpH_i|m`s+9G%cfAW4ZKM9d;r;2i?=tf>7i6SC1CVS6fGcYk=22hfE;0 zqYFH6Fk3L$TvtHbNgv&bHZUVZZNpBJ`LqGFsdpF4aq^#q0_iB^gXy=ViO?kWcMZB8 ztlfm=n5Pv9ErfuDANCf!i;ruPw0&CyLVn%V?n$d>n%aXey-d>Bk4C>tHZUWSWK>tV z{OT_SnLWz%vlh`fFJ%sTtqK+K&B{CYI6hjktn@x1QLZ&}iay7aO@i&nnGga=!G^pv zt#WTvHEHwr(@ecfvt_3gj~R4*aT^F$!CbbJD;D^k&oG$r?5eGTpm*p$N(^KY_j_^j_5q4rDG_@{p`ozfD3?+d@T zbZF4N!LS*Hls?2Du7wWq%>uXw-1O)@Jr_cv(c1O! z<=km8;xjR)33ffBxNKgF7zchjaaLE%SP!f>ITnjSpG#3?_IA-?r2#Q-3Hr(l1WDxF?)~|B81>LWF`JWb1ns4s3);5eA zL4yJ_lK{|>2AG%YGH|*bOAfWivdEg0k74@)eD_VC9Y&miq^$>bg~vuwTW^KZWV(?e zB#&O;i!fm5tXcxQcljLoMRf!43f?XXWtX^oDWvg?`x18ZX6TVB4uNcqmhWYpFzZ#x zPhF|*STv`y$9E+6PKyX&TK^JqCyzEp9FeRe%d_;gF%xFWs`U_jiJfNshQ9Q91v z_WG8_s_1QK$fHaoFMc1GM;WKemRF$)k8+{sYgW5|^6*GyT6U?ud3ke_Z3GBA=0vYl zI9kjIE5iid(xYZFQD=$wcvT)mdZ3>$fV8Wu&gB-BJ(A^=Ld)}?qg<`S5#{ZQ=wCy% z|A)G_3W}@S0tS;1Ac6#U2o{19+=<`>2=4Cg?ykYz6WrZt+%>o~?%sF)xd2j z#RbYf-%JJ8nB6@8+4?{R9&~ro0NElmur+Y}!d^93mhgW0Y47UOt+C93>?{i@xpi{LY_fzA>jgp9d;w-OK_lww~kA01&^}<@g|%XR_ZPF`-Bg z9XKUE;zFNzK>wFSyVpRK@q|Aw%yQ{={K{K4CPyOB)q)nI;T=#oj@}OJe2%BsB@wwm zSu-`2JO9aaJ;MnE3UKA+6v`j;s-GU}-gXRP8>u_8uH27$^-knVX6B_@wW}jWE zgnZ<-zi*BkL$#Ol;*xZ4;X*OqC5ttSWTZS%e{M<~sK1nHM3$f?{>ESbOFoh`Q%(F- zP_r4SQ9Wk10-E$|nH8%l$5pO~gMJ!5*W09UId1>!TITafS&xr*p8e}dPcrp+NcoGL z+VXs{4F~dMlh;v2?ywOK?DE{oeuBqLYEG8e`j-ve`}syYBfgV%I91Rsxx>K>U&3;& z8po0&*of6_h7X7mZkI6H#?Q8f6cjX;%x{6dW@)Bm)?bo%SQJPs%4hw?Vdl6}?uqZ9csJ>_;jWI22_fmWPEBlT?Mt{IegD@O>!w&ivS8pa#=L;Ebs5 zu#K03c0Mk8Ek-ARQQL+fMhBN!e;{hp5sw+OY}P=Op(|=A3aKkbLYQJHW+-@*1qn}> zbHq{6Ih~3Ao1%U|JJpt2$Iu*bkl)3`&othsbCeWrnqBdE3Xy?rOo_J2Rw1xpn5sYM6$iLCb-J? zxw+hOmD&auMlPU2`*M(UX#tY5I24zelDY~M;UPdjAm0U@jH4Vf-38A%v7dQfyU{94 ztBFfgLC-JprU$;C4;;lTMjwH|uLQcfj+n^=$iXl0Y@0g|n)(d-EtDlEI)@~J%;+wq zDxv3eS3z;f>j@?*Zv^WOI(osE{Rf}*d5CN9Emp{3{!zJs2K0s8#sfy@UE-$SWMhV+ z4E&*|d05MVYKl3T(-kA8ElVL5<>4nzq?j5XY+KgrZj^J3#6|gBo$+VV`&}_LygCnl zP%K$|95DFE6~oaKDQyYtl`z{^EKIfAyFpLgXus`euF3Wzm9u=Q)*-=XrB8kXt)~_` zQ?A@-{h^S!PBlE_!3n;5tD5(Mv?oIH_a3Q45o&Vf zY~@{E?&lmNDrd5T!ak-=2N6vX*mN*qn@|LpSGd7zDexCX{WzY!jve=Oz?D$q~gX=Gv$CW=L{1) z`GnL#O&{%-$(W(-BiZ_zEZ55m!Md$Ai+Y}>QjSg&Vxy%4B z+e6w8meaeF^}JdKj7d-a=lZP3Qr3KAf+h1xg=If7x%Wn6Te?CrG zPbiP#LIs=Al*F?^zSx2qA0NURGx;$ zRrImkoE>EI+4k<*254i@te^6qs#2ja?7cvOvRQf-!~QL1pucf}lBVWIvD@{;)~rIg z^68KXX0ZtYO%H`>pr&+#!0D;a<m@TW4OYihVfyO=H08^#+xyd`6@4- z%kgJ*doh;Aw^;lhr#txwDE}7hJykZPu@)7p%|az6u#z?Dx#Y_eoo0&|j3FwCg(o2n z7cUobPqIS5l#S$YdB{|A!iN;#rCq1U@u)E~(LJ$1S##o-CCK<^F2AW%kf~JjGxSxW zt%a0hwS_m^b|JN20J?zcdFJIcQi{c*${CD%5FGuYeU_rIh~2}Ge+x<1k;8TT)B-PV z#gI{@y+Ms>B=?!vg}&#mASULt!j(T~y`7IT9MBU5jEa7uj@x@~V|;PjW-_Xa=+7CQ zD{Mj&NC9AUBHUMPD49Qil7JwsX8n)GDrfV&4Ik>Y3=PHUjB$uwF;6pj6BDaPqTsS2 zRVDvK^qXQ*))z+DG{>Qy%?ec=i$fhm8~bR#EqV#7?w3L1ca-=)Ldx76->4{>6DhK`o3oh@5g^|F>~GI9 zU_*Kn2qu2OyGKDBJ_!I$a7iT4m=5;~p{{+F?nm+x_!bQ@?GJCYY)GQQeimwu*y&Sk z-DDC_FXPB;5Wr~pDdqS;4}7~@#~ZU9s{RgRskQm1^<#cRmydZp>2%=tTsLzRLBw1n z>6|e#zXoJ*l6A%|J8csvRMY!Ulx0sVIfGc=!FPiNLatK#X=*NVvCCim-i^~tS;Q5X z66J4{-^>KfUheYqze9c_qLn)S-bP=|j9gCfn>O!RvQXqwr^(tF7e1>K+v_NitHhrX zH|o*e!tL7@i3Tdn`qWDsF|(y}pMn6@!n_y_DIF^?`a? zO8V)qAaEMddb@IwnFKTWb25&b4hHSAMyYUeSkEQWW5R(JYr8_ql8n~2Y|Zyp4a&5x zE$hrV7`1s#U;x(hsi6&tnCXw+E7ptHt?wD;c75x21FT6WW#3mxzfu|Z_Hw>i$STZc zaB~uM$jjAZ6E@E*i1Y{?Kg!8H;phyupe67%lyz?2M``n$^n0soP8P>-VlLAN z4NeRf0oOY`khZX}bMgefgoekJXNCc=T0v=Jd<|=`wal06d>vN)509EyIn!kRVf3PG`c7k*xM*m#K#RG4PyyR}N+xXE4Ki(xI?FNwL{ zCwAj{_b%Mw#Q-h&Wr4hGo#OvDdD#$@wdG_JS4SJkYRFSe&q1v5!Ll*5?#}?N2Q!b@ ztPxdV&CJ9^*K@w+(wPqY)G$Y`ch4o>&Cm1MpJ@? zpCpQkoL7HMKa=J$zE+wygy6#e0{`{K9@Ek;ptqpRs# z9O%%Sd5k#sUE1jdC1&O6T0$JE80S8tQ1fu#dY8{tEiGT41`)1I{=d}#jq&F${~y)> zU724UO5JVr5i9lUiwOM!eL%)K?7|9 z?Dj%c3Ew<>rh{5tA@%97$TTr**}75RnU`7J63;SnvkGRI6=RTGTxHjWZcn{@C`|@<)WJyJh67M#faDN%zyGPOSh1yMKH*IB_Pv@5hWN#X3-52*arKX<$?T}2$8(h$(Bb6L z@NlH**;zBdO;EkqH#>tBr%DRMA`{FUJk~qyoXSi=N&2){np0s2UtiTPLVp&lT9|=6 ziKk~4X0jDd>vW4}vX9C&r(KmweqZu?UY3Y#z1w_;3`KkK@`XS7Yo+6n+P+~(EuTMPr`~40Nhn?D*GjE2?w^>$_gNR+DBKdEV)_;>R!1fG=llc9Bu*-HfuGtwF z)71qzkIrZ<_g&nWe8*3MK_#^gbk<%AXBpK#S^d&-y)F-oq;-a+*ZV;oxriCD z$VL33J<_}TlIT>tw9$+gbN!TWwgTCA;|UKw@;;pIiWtOHjr;>y- zl1CEOM;hHVCY1Svizbs)_UE{huAHQ6&)Auq;{|ka4mwxX?DPYKclC3({@}xNZDr3_ zm&p~L0(D(@4LhMG2*+7^Nw|5h^JJXb#JjA$UkB*bC`$60Tb7a{3A(7=a#%7_5Q2F}l{;?O#4{5K38dzUX-!RDA5^;_T@hrd5OGuA#lCYm!Nnoe}yzP7)y*c;=t#vFfxT?a!>!deH zarM0VZY}{?d9K6d#h8lE4Lt#!0D|JGM_?Q>3Qkfjm#!}EX#3Tmy>=3cH764h3R8L# z3QLq-?c_lQ>LyNJ?5-%z66hCSboZI7jbNXzi7vO(*tdjajMr4L^< zGF5x<084}4@YngJPHIOW*25`xSeRI+Y!!Cc3kB9>*3J5%p`k50rZ{l)viNc+6sIq1 z!<4-Qtfs(u2s>f+$@awfK(7@A1s@jv*IjE!n=ve84UdjGoZr6(u|%y%?X-HhoVZNl zEi!PX0emvK*M4?Wu+ZeQOYHG=VQkPMHRO1i>YAtvZBbLL!=jh3+TR<9FdY110urL=LTfOc= z%3#`L`h<*Fr!(b&*unY&=GoQGBsuJQj?hYgaJHf)6G+ZaopXPYn7A4&f#g|V45HS3 zKdZB{V((+o7T|;Rs?h*`W@pv2#_`eHVy(QWo)HDDIgh4drRM&!?$b;wBxR=6W4v{- zte&W7iG3P|vM(w|#z4kjbAO?R8`18h3eH<@bz33gH6tOhHaotBL0zR6>WI!D1A#hn z)pm#(F*A+wHd2L|X|e)Vh6gca6Ye($Z}!+rSPDV0^Pt>@o+J7J8=Jlxsar>fsMvhB zw88QG>9;XJAFvkaaWn7bMeH0qpwMDlTH4uCQd5@WkO|n&mSH3(-58M@w%h~#%KqS^ z4?J#g=R1t6xrq;7WOP0O)F#SUf$WTVxdxA#6w+z7#Px(>Z`d~Ce$wTX%WoBxx5q4( zO3l;FsWEd>#~v!yQXpgkLHsQbmsG`Xmj7U5Z(i$g5_0NZ5y>AB!|mkYrNsh6yXOph z8B!0K30@Hz+p=yzSion9Z$f3pImz^F1V!|Odsl-)JHkUd(6ETE1wLSl=!5!X#*={8 z=&QSF1!XpcYqO$?qT*A`qi!^!1%akJg)JX;WVo(^CB&%&VmL2RJmD^ZqAM@LB)b(> zEd{*MUH_=7Ni~^3TJ~SZ&rJLTis<)I_`!8p*M5stW|7}#iL86-Uc{EPldK#1!zt+- zLud_QytpVLC(klT0C_%h{WU>w_icfIw*d0?BKzun+fmn*iq8%`!JVdg+s`AYuF3;b zc*2WG*7h|db>RUtx(nRV)#uZzJ_?gEN_5DWKVFPJ z5ApeuwfF$pboqG;|uAj(Mm`b zzt0OT?P;X4{5k~)g|`P{M5hghJ_gSZ%(uifeMs0>h4cj}$SAEZ-`vzr&O4EPKFJ~o z#N#~Kvi7gZOqj*^$UjAFel<%RT(>X$pnU@e?@)Z@ogfgUMl8Tj8znc_vEF2X5&ceD z$izexW&(5Xfb;HvgVlMpsb9gPq``+>C#(P}m&{v7Wlj(N)%!Ko`S~LOJ&*+*jI-|1Vz6eH$WAw^))DLE-r9cEi^Dt6SLF*kEAFxbPFwsnBVhIFz48fa z+Ov1O?^jgA+h40WTJRFp!fPT1c0C(z4G z^aQX+wqRMNRoYy(vcS8Jue!-;^zHMZxQm(ofi6I#TdSA}U;nz7g4iXlg}=2(m7-g3 z3ane*a7tVY4nf84-^V#O^FlTEqVuW{-9DdAAKpU6o_|5*dPG|Q=11lH>c&}B)^0Y% zBLlbJ3T02zbn{iW7SP6ZfXOs3yXtUjkTT-y33*Qj?9C;X*FF+lw#lW`QbCd(JW$Qa zpLLCEuWVMe*uSySSy9sjPRw5ARCrMF&JfdJPKR&DOfeOd$M#>SvS%Kg9=Cz zX8qjsJ{+g9$(s+pxm&_B#A|NRKRk;eG_kf#2@{A6$?euV9YI00JL9|l=HXjJIL&Ni zzTQn6cyc)@;GY9B(NeUAj;$x%*&9FRVVEEbf7Ok#nBq}mvpzIkfjI1x%slv*-2{h8 zm|w%&KMJFssjD?WTivdYn*-zpx^a-tHZvYar3#_&u)=K;N*Ro2$mDY=L?w|-NOwb!b@$7vT-@LzTqo5 z=8c#(**N7QizsjzE8|rkAUaW+)ph~YkY7MgifKuT=^b0V^7OQFhH1&Z_#NA{GFyDd zPEiHu7kyH{&%{Jig`LqCHK%W?$?KkXvWJnC^e|^0v=<^xDA~y_PoTMc&GViMn)`Tc&O6; zqymGxtRERSuJt|H-ZDE3A39spO&h6t_~!)I45thH-13=4;eG41Na?4{2fA!E76ap_ z9BR8$QEBK#T2n;mtOBifl9POCN%5>yK0Pc#KWgph>8f*`L-AV_jHA?ao@v2*3I2U8 zph}+-^Y*xXRB3(YT~J2#4(#^c1m_{NUrNK;YWO!96^4@(cE;}|odXfMMEiSJH@1N3 zUmm4#f~blm0}*dDK3o}B>a01Artv1w54|a1C^xZjEB90wAa9e@{$hBbUZqSS9}(ue zxe<9^dJF{yuGQ&C3#x9fheVeQpH>z(tl>mVVZ?+4hh%BrFSG9_*94YTA{ga~SPQ$p zY^I^#-gcYYwtPsG?pmYB6VcLjy&^A?kS;o7mrKu-m`?Ek_vGa0TQ0s^7(xyW{_Cm< z+;16?-?7L$X@1mgAPYWXi<;Af+vVJIXF^)!GPB;%myNDQ@Nx*I4ft}&xaTTsM3v1G zc*PmUGB}o;t$U)%zOwC>thJh3AY4yk24Oa`)L+bnQ#si#!mh6^p3@!YMit;84}FBO z^Bz=+auVXei$!1Qca8 zEmQhdHwnMn@DJbt@m96?t{ZxevyzjG5i&>k>uiMVu>~&?D6Wrc2Dok4Uv4irB_Gx7 z8fQ_|SbhDQU^(&gnyAM=i*2mbk-b?e;VMB0F|7OYO`5lArdOD&%MF2{Y2RsVIsES~ z!_3Sdnn&LHU$rIx^B%SPBEGtICx72PT!&z+zo06^aaOLFlrR7THIvC00MiqrAJL8Iw&Iq0I0~iX7tS(N1^g0y-NUN+lVUpJ0sJz=eSRRh}BOV zD+lK?2}X;JoKdM!ai>od^8=k4{L4dyx)Lhq!r!=m{}#i20*?xPvYK)0KuM;F#_vx0 z5o=pB>>rv*w@!Q&SEA3cpZ@ynchkj_*l<5y8wut@f7980suQT``8Kri%N2?;ILpwb zWV1@2q^g?=)=g~=^s*|_`5ZHFEsryM5vglYnd!pAH_Z6Mpb9jQ)*8Y>6-jP&bQe_v z9mK)9JI?>adoE)jx`6ST1B*gPRjmG3p&*rT<^Dle+w;<)=K=y;5wwqa=|h#~pMMV| zYrrMyYiR6l!P5Ej4A~%bR7T(FPQUfWa2^W&Zo29Pd16rx;Y2+S)G$F+|EA97uAD|i z{Jw7~<~0`k#=zM@Af$+tlA$YcSiWzLC@m8aLcS(XsPrQ5u-L4}xW%+gOK~+Re9(?a$6hxCL2vynREA*grQGHdX!D=Ph8hr)@5)gWx6DiqW zm+fICJqZ?hPYMfodkhZFW{az}A7GMd?_tr)p9?8bc*zObn>jTP<`hvfmZ zNkZnY=IFUGIGnfDPtq^#u+D1!tj3QbCckW+VRe>K8!LJIrP$$HJc(zW=4vAOuG+kiA20RefA z1u?Do*-{*&G-o!+-ZpWozTwGY+suCRbP18f9a-(D;gI0EUb%Nv9o1I2(I2|%t;K5?Og@X`vLf_fi2J3FVkR@_gR^wMXuQn&HOss8 zSUj48_P~5OJxNbGtTp;mTGYD**)IWox<<7<^*27_Ne0VN>|Rr9EOsnAOCHRPL+8H& zYF8z1_|LAcxt!c7CUMNP!)mmiAQ@MQ?PK1FCns_BI=_i-;Sq5=u;wOR-mJNJB_(NB z**vH_ZpoOrHM7dozjdJpLm_Q_e6f8q@3f0>WyR(w;kUQjVPoln5$2cuVg zAlSts`RLrr`6*+>0Nd^@@9+rJcvJsR(H;Sw)86|8_m;9A3rSc}Od>>F{)_Aa8=$y4 zpUh2_qZGsRmI!;3jlC9?R8U6|ebcyoL2e)J_x<(t3kEc2x>TWN_d5p_c*=}kX%X9A z%69_V6gF@oXe(Zv3SirUxt0VAlD5G592X8^DERg_<=>J{HnKsz>_?@C-Gf(c{lW1) z0Qv-DOG{-CIlkHUQI zcihE;frZmigat3M*ekgsbapm7O?o(eN>4eVEt?WHIjJGYg_Ll3dfJ&H?x@JF=^wr~ zliKh+JD7mUH@F*fkxQjvwp@Ew38Jgk^rhx+(-K^RqGdwbBeDIXMIS{2hLTmlUDKCG zmzh~mQ)ZuL|D4bBhUZD)wK`G*rfIbtW#n|AgiJt1$Wv28Y2;d-3g(%Llf0e)SDPGQbTUZ4-clC znfDG4X!SO|uc&y!=xv<>t_&yoVj3R0#PHdogQZNmukhz~d81p!199jK1v|*N<&O+4 z@}uc?c~dlOG@gs1JRFvr!mx)6j2Ba9s$i+8h4YQ%?pA*bkDy>q^4&3Wkt6ZV`{2B( z!{J=}t*=OvV&-k6-N^Xg62r>!xV;Dr4iaTrE#Wgdmq=YodzwAymXF96UQ;S+_h5dr6l4k?r6tz18W#VPV}3 z?`+pjBe5%0(sxmZ(}^5TTXFircj|jQ|@kE2u8V ze5k18gz?R=F_Sa{MYoSBn$sb_wP>|;q?5};C)wtP4W*a<&;(trUR zf;zq=l|{ptxbJHrU#^C5)zPN!Y!S<&ZTTw-bjh$R0E&L zqOkl+bsN){ZLVj#Bt$N*O|jJzFK@V^%SY8YgKd@N};#|HQ0B5(V52s%N4#z_6Au(^yGqM%8!y@joY z>IB(J3q8>VsR-7jsh%DTs^04CPTk%J{MMD}rtA+O1y6Ry0li%=4`miJxv+&nuvw*R zb)tmy&-K`w=^)L|IamensVBbjiR7W79qAqnrqi45Y@cU4n<)~cdrrP0=SETbYFoWS z^3KT4uxMnjkUcE)d+WSewP)CK&UU;2T4M7dGS5H8=z$Sbc}4MUR$3gyYU5vVXWvkrjpvYY&f- z6xjDhCv%79bN}pRe*q0StX{cOfCrx{#mQBtxYXLN7r{Jqr>8sjhulv0>wx&5W$EvKtSlNXEtr<+zFWj+<;v!`&#@MtKVt!Id|ye^&Rt`th;# zye!*&a4Ejh`d2a{q!kt5bBn#Q>@QIPmkZEW-N?ieiDBj0rfw((W>1C#Z+Rl7%d&X^ zmBq<9xv@(k^JT1<23doJFb?s^byLPgzSZo#r<>6H50E>d0I|o7dYeZTw zZ6Tb2io_h-xgO1dh8^OE5OV0hLwdSDI8VBT(x0g1PDFv_9_6F zhDwNti|&7UFc#wB(te4W3M64o(libeX5)fNPT-oIS<9}TN* z4KdtkvtGC+JCDA>IVjF=oJee%!~6vN206|Smd$oosdZ$|MC;^v3g=n^2Z_`oS|)GY z@OZOu#J*t^ek|$6vDxg4%J)zhh@;hQJPt|FIFinx1iVA8@A-GOoW(}wIeDGn^*T## zftZ~~yX$Q7Nj)UbMZOrjrlx+Ht8)uYWV7)0c@8upZnR0W_n-I>pZ#d$3)&w^Jx#!z zHe36Xs46KQvif@tN{xOff@5DH@Hg z!pha%_+Y(uW%sRI=b_M3r6$y7jh!ovaJ&rBamU?wc}dM33d2oB7BjO;o!VH?h=@+| zh)OnjGKlw1=>Wqd9FX9kUpob?X=o^Qr3((-7TwUpuswTQT`$DsX_Pm5pzdB^;;-0i zg|v8iG2gCrSd}sH0~4%yAw>Jn|VHy%4m_6lK+9h zNI~I8t)5)}Lh%d!LO1zPJS@pO@9}PAf-m|zmT^Y(zgX?iuu^B(n@2pU@>lCNDyxXP z6);&U)i9kRe;QrYnHpM@>hxGjS1-714ZX=|W|M&zGhqRADQhh4LWh4Q^47Ts4&jXL zIFXzf9&>1%j-DtUcMPaF0X;QAN<3v?{c9?4+z(Ns)y&{I6*j$|`}iAX41`mU38uO} zd-(U%oRRs)I2oN9hu0?tEtGDNn@1u4UHvPSX@$5lRnfho0Nm(L6i9?Z`78{1;4v#? zOe!U#AC)dkU%+$M_l{DJN5*vqn3XI7fsvuB*<_3xG3x5T&Wankj_0qQyRTvCK?Q7v z=Ijl7gP8o9gwE;;ib6NU8kEwiPj1GD{ne(8JM^mP+o(|!8L<3NlrG^(on2$3pxE_8 zHq26$-n_`=ZY_-wc&et|By%Qcb1x%opUHew3i&Q6aKysIwd=v6xVJWE4^ z96OCxsW05U&K3^0Fz_NTd~q;g3ZsH4IM^0F{ar`KBKnm%gO0T2r)CbEu8zJx!GzI{ z3Q@d}yYq(K-(gHh*M>9Z{C6i^b;mKUo^^RH25F>9d|c3+**y>OK!{?t$E5z&7xkwY zwda6^dSI07Al|58*===8!^=bQB?suobJj&_r0{w!s%c$akyQ8q1c(L4oTAGvjPIXB zYH)D>_^*L={rl)xwVBxwExA2|1K+QXhy7+Xparwr^eAmx@sw1a2{PrnzM(^2MhWOG z{xEF8m?}GV_H;PkXDtrxlay=|)kL8Y`cfcrmR_V_sAF6uRXwM24&AS+&68sl>WL&r zxv1|_Vr!wa3_IJYmK{6ir;#|BROIA-N8tK;huKZe_;-3yxMookY#C_Tu__l3wu@cN zkMFr-<$`~iFSsTIp=Y1~7abv-?DB9ZcsFj@*yc=|JyFG+p?K{G znJxTibJI5iZIO#Eh3kfRd(&mWL2X$5c;hDZ_ufLsH_cx^^C$X755GDZ;>^^>F0y6B z9fw>H$BrgRAUtPxvy+&wEer;!g;JaAu155C)TH|AKYHG)G&>RjW5Ax3C|8F1_#v== z6pg@Viy^RVK^r>zWY^1awj`!$<3(Ijx6%no;h9@1bwP zo?l;7j}=%q-;7v!ebdn)CE~V!PisccDCzOqJJqT&8R$2(dg4NJJJNr%y^?NR(kzC2}3c0?CZNUOuIj=o13s}`(KICV%SDbRd~_*)!OA95~)az z)5feh|Dw46w7<-zz>drUx z{M^ESD~4XV`Rk(rLDO0Q#k#sL6gqS%J_j`WcKdh3ZHI-1t|p}B|4>m8gDiu%&P!Mt zk7mHnoNtr!fxU7;cXi(UbXOXV6?A=FgUkg5{uY)eh$4))Je>WDu}Ctf z&W?wqt;WwmVf?*CZ%T@DJ2Jc0uI5{K7YvOYv{^R~)Y$N4|Eqz3gT$Q4{MNWaLIFXV z6=*K)USgz6GF{i}TZ?rEYJ(fP19shOmFn6}73&&No89c>6+{e7JUXeP?GO~4guRy{ zFO~Vj#TgAvX$*@{?b1;5_xfsh=QL64X2VPZujbmus;r6OEnMl}Dg}ct2oO+sYA%y^ z_(v$rmX4$9<_VAT-p}JUNAu)wAN7|L2yqz><2E@S4cd(fzau&~DIskK1#0_*@o5tebpK|D|NCECdej`4UWxPThHtzL+9kS~n*4$?k>o znkpo5>bgP<8rs*o9fh=;$STqzek6Xh<|fn*s*?%AyHyRH+#W2V#7Q$b;2X45IrnNq&%pQ=jbnms1@Qe<3|@ZLk7u6Q;!HYIBH39m6e zyu%{;IkZK_2Qph?PM@6E&y!B$3~@M^wE>3VSp!|qtL^t>HB)ZUy8@BBn7;B-{P-_7 z3JPv=PFV_zV&mYPFE>NZrY5Cz!AyF$-_On*3GI{r4+>RAT@w*6VfS}8(x~HH^zp!8 z%PQw_U3CT=a$zK+{VAM~(1`VElgWy2fPeLWd#L83-494hbJj0XBY;oZ}3 zE{2*^iMs#VL_nqWSMD_6l2ZRKw#h8MkyHHZ5I?Pm!W{5g|M!ccD`dX>kHiB1(%Cyd zLlpH#LK@N;>3OFYh=ex`fjoR$vd^C!aA<2x2*IE_U2A)cWNf|O_cj>gdE{ki+9gAR zYfWZLed@bTTiy~QC(To9nbF<-a(gTlXtZ{7Mw-YsXa5>h7Yx$DU9|4H3@2v6pJYnn z@ZyKG+*`(4i0=k)A3sylys;bQJ)lGCdcP4w;(R$hn*u6XU*=vQJ067A;Y2h%Zgb4q zoKffhkkc4ZvjRy93kz2#DlDprsC8@HEMKXjL-P_Wqz>aC7RB`IW$spPYr$@ElLvCG z^-qtsZ9NJgKUUK6V57>kMbF#QUT$f0_*5#*wNm%@6ZRWO=<+V9SHOyD-S@@=PFNI? zpF%=Hz9%yWh7-C0hgPR0aoom)7 z69V~F+8s`D@$s}*rSA!C>gfao_@RA^&RzO|u5aAQ)LZQgDAB5ah#I2V0YL5nK(6aV zgGv<^BH^XGS>q`^5L4^jXswDe;Qovmc zG*1Y{ifAAy8Bqm3%_>$Zjn)20b3B z9^-@i{9gydnU@%tZ`srS1&*L*vf2psx1R1?gN=&Y32K)Ron#@hi3C!(9`BP$m9R972dJXG%ZuS?Mw$5rLLI-#7#*tysraaf( zN(poDz8OXy#YB6NX2b^K>!&$J3``6&r~6!-Oxz=c$=1%6rTfnorRpb`oyL!yWURuQ4_ytuA;R&Cm6gODOYs{z?+-%efLbqx?H`WuR^DvWLs* za3Y=r3@OepTwa*Qr43KCO6c|gT6xl`U-OOeJrZ&(u_jN%paY#o;~^qGGf#x};F@UQ z=;>8#C4pC^1vveRa>@qr9@(qLxO9$o2b}n$n~#*#2hg&SMwu?yZ7bzz0&i**7vmvE z5+B!Fy}kn#)&IPU!U%`69fZ)SWZ<{SlwNHX`|tq zQ(QvgfLP0V4SqO&yfOIAxRszLUsl%MCU)am4Fw zl5%&uuT^cfV|jM|HVz=FbswPW_}QqAB~54?0r(gfpLCiueTbVJ@aG`wK25F_en7Yb zbxvRccNs3{+khhVMVO2~KT>Ph0PH~KlVy04+sD6PbwC~1@Si={nm5W=>j)x#Pu3~4 zz5jiCBwYlUuh(!{ruCdTE1Cwi30ffWAGSE3G8xf5H~6c23u)aGn8?4kFNqN*HkR zfmNyhy()aDSnLOzji0mCt_~l7BLTQs9(UvK|AKA=fG$Oqq{i!M(f`3sOaq4y44VC| zL9aaRKOo6{$msNcO*K)f#xctC_~IEb++!ddkL7^%Pk7 z{{%7`|9hdc1Ie0yLoVRM|G&OuL`EZukXfI}>k$^KqHnVwg# zNj&aIpWaFu4p`6HZZ8>KHhy}YjwL=0s2HWIaCpg0G6IJ@zPnvw<$-PiIdwrq3EIfI zx8WpIcUxj>Iqhw`zKItwbIdGnK33I)&6)+dw@F@Be}dJ!6hNyU?<7HqdHmpSCZ!I) zOSQBP^o;>=ifnth^!heKY)mx8J@-%-_UnlS@+95xycsl_PKK+cNoLMqjCOmuI2gb+ z!A~4|mVI=-VePL+Kt$8`mH!`d)O;{fbIJmJLV^R=rLtMsiZe$FT)hm#HkgtmV0Y)# z>kHosB>366?X+(fG}5)9k7E6blHC6Ocfu0B$wyQ8nrUzZKc0%Pb` zVrFvR)-T%9VqwLP>;oNw?>TScE>+%SaxC#@5o)4ZV!cx+!kkr?I$)AZD)aBFMLg)>!P|`UgCy9C3&0Fk~D>cYFEUcdSowN~$*@ta! zzfj4`c_0rvR9b&FIK*hTu{q#y&N1UDcc1x5PuOqPv_!Iec5>6r#Qebzl-SD;zQ2D` z!e>K_T~!u}W$HaoQCg!EEA*LpB4#WD5eomr%XS2IbEu5W(nW`KX>4h;lvIX0Ih7P{ zWCIPN~ zxX4Fre7GgR1Naf}x%qM_DRZ18jg}T1fHCny@HlBA-@g-nkEMk#77fLt;b3b-_j;7h0K${|N^TH$&H}C)E9$QwEN#9L) zFV28qT&3#mr7F5i$g=A6uAN?Q_!>^zH5c@5t<{hCLRwNXw^wci@b-c5d2@O;?~~h= z`Lo}@vl|b}nyDEX^QANSpLnFV)bha?F)v|kUq(C&_H~|r7$H`ikN}aijAA6n`UpmVur(g9bcPy>f>DvtV zbultBa)Rz(t=6S}`F!WhWpfRNK(+CzT7tmRbyJqZ4{*fT?Bki-`#3Ho zojY=PFjxipgf&&5RGEKm{ZFYXn>)hvcUg!Sj{`>yp`uBQW*Y*8f-{0r^zOL{0!)q=s=$&E3Z`qrM z%!2GT2YQ>oEMu@QDokXmv~BpA6PdcEX$RQ(HoxmT>b*~&&KD8JgRHG>sPeL4BNv=m zfUq@7YrWx~JT-thZ6kBdopeI#)fT+eS&^{6vC(27r+>uLfAs54Q~q7&r2^5UjPqa1 z1QVZ?qmkp=H?j+U$DK6lN*JC$AApYUu1{z>Wc7k^P zT0NsaV9j>%IY%RgOZg&wrN}SZhtDZ4W}wy5u#jZ=xq9q~Uve^nrr;E!l6lNzPyhp- zeyk|%LsX*SR6t0eCy4j``}c`{{=Ty-1oi}O>Yx@dy0f3`B9?kFi&k1^Dl4rufCtic zWtzK3=7vsQ+`^LJDVZEC-dDk}ISqQ77MgR65jLB(~nmc3=8sfLVXG{ANPxX_{UB4O7RRHa4rr zvS74{h*K!PL`f$7jFsd)8}QA88^z}fV^j&E>GGm3UK~zP)$k0{b5ba$?*qlzU085C zj37S>UK_wn%q`C9mw0A}56y>xr8ASBgj6fV&GQ}Nz5<8bnVA?6Q?A*D&uAxzza&ru=0wGWyZ%x7U9 z2*nJUI>%MaCn1qiWxQ{5QN}1?j2C7~h#5`2aqL$Nh*pAy&9|ve`Mq|}*QA4~e>cFP zlMeF2T{au*>t~9@d?|QeRsQ(#y7W03y8DM53l&aho{U##ZX_rSi$q*jzTqB29aEVK`Mb}>@k!)YN zh;`QLd}QQbX!Zuq7Ep@*`1Q70Tl11f#=0IVrW*+9coTewL>S*Kj~OX*!GBZ2j52wf zs4uC?1;qQXQC8lbD1+3c#jwH3!9^h_YYt*JWvWi9X^#kSvnJKZ5(V#O)0wph9lFaS zo-sMR{Mss$2N!z+p@DPnoQ;RQ(hey8p68d3U$M`VE6w-I=p+v{TdWJdukRV|x(hP0 zxEK}?7}#}()@ZN}Wex)?2MB_i8iz&U;Kp?DQ-9@?Cm^beGpsAXeB#Z}#fR|dcaGmL z<~2=pENj0o!yOm!PKA7hK;X(0-8pntD;CcYXNKWe{xC~N>q=pttgGz8Xni4QHaFP3 zIU{U~4%9Hx%E>9WX0l7)#DoYcpJ^r`94LpW*dDMUlnM_FJE{e%ii?v!1XZhN zKZ9KstK=~WVRPJrWo&q9k3?@!eWi}|79Vwfxs2QUaDQ+*XlcN`@!t4Dw%#TRQ`JLF zDA{EkZ1q5pS?+_FeAVFrhH?IMigsSxX??dRfI6$pprz!#5$H(U}NvEK*7zm2#h=MNYh1PZnc4X+; z_WVZKnr3ch$xh&@*+B{-CrPN~z%h?8?lOP2vZ7+Vv;wx!ZIK=7x^>g0P4j)fGBxbO zI@ZLt%o+k;3FZ|&Bb%Kpi_pX=w01R_p#3~>Y_p79E(2Dd32`@j4!3B_kjVz*jgG0 z&jx2N{E(d{=3G_hWJQ>eot&WhUQFDZ1jYm75QUiPAo}9dh@XZLRY7w$n_3H1}NqpuaV89zU;N=b;(X>xDv1{1b*K z?LF~h_dqBB9?FSIG>&O1j1p7>f}KVo8vc=U5s8#(%izO@hF|76oNeTD-R%|mEG!3f zLbI~#2mYh@8HU8ai7zi)pnYyTGn?y2?veUf~&_8@WX?$trozgre~ MAND&`eK7R$Z-lDX?*IS* diff --git a/dev/limitador/limitador-server/kubernetes/kuard-podmonitor.yaml b/dev/limitador/limitador-server/kubernetes/kuard-podmonitor.yaml deleted file mode 100644 index 631edef1..00000000 --- a/dev/limitador/limitador-server/kubernetes/kuard-podmonitor.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: PodMonitor -metadata: - name: kuard - labels: - app: kuard -spec: - podMetricsEndpoints: - - interval: 10s - path: /stats/prometheus - port: envoy-metrics - scheme: http - selector: - matchLabels: - app: kuard diff --git a/dev/limitador/limitador-server/kubernetes/kuard-service.yaml b/dev/limitador/limitador-server/kubernetes/kuard-service.yaml deleted file mode 100644 index c5335050..00000000 --- a/dev/limitador/limitador-server/kubernetes/kuard-service.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: kuard - labels: - app: kuard - annotations: - service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" -spec: - type: LoadBalancer - ports: - - name: envoy-http - port: 80 - targetPort: envoy-http - selector: - app: kuard \ No newline at end of file diff --git a/dev/limitador/limitador-server/kubernetes/limitador-config-configmap.yaml b/dev/limitador/limitador-server/kubernetes/limitador-config-configmap.yaml deleted file mode 100644 index a14318c2..00000000 --- a/dev/limitador/limitador-server/kubernetes/limitador-config-configmap.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: limitador-config - labels: - app: limitador -data: - limits.yaml: | - --- - - - namespace: kuard - max_value: 1000 - seconds: 1 - conditions: [] - variables: - - per_hostname_per_second_burst \ No newline at end of file diff --git a/dev/limitador/limitador-server/kubernetes/limitador-deployment.yaml b/dev/limitador/limitador-server/kubernetes/limitador-deployment.yaml deleted file mode 100644 index 0b236b66..00000000 --- a/dev/limitador/limitador-server/kubernetes/limitador-deployment.yaml +++ /dev/null @@ -1,62 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: limitador - labels: - app: limitador -spec: - replicas: 2 - selector: - matchLabels: - app: limitador - template: - metadata: - labels: - app: limitador - spec: - containers: - - name: limitador - image: quay.io/kuadrant/limitador:latest - imagePullPolicy: IfNotPresent - env: - - name: RUST_LOG - value: info - - name: REDIS_URL - value: "redis://redis:6379" - - name: LIMITS_FILE - value: /home/limitador/limits.yaml - ports: - - name: http - containerPort: 8080 - protocol: TCP - - name: grpc - containerPort: 8081 - protocol: TCP - livenessProbe: - httpGet: - path: /status - port: http - scheme: HTTP - initialDelaySeconds: 5 - timeoutSeconds: 2 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /status - port: http - scheme: HTTP - initialDelaySeconds: 5 - timeoutSeconds: 5 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - volumeMounts: - - mountPath: /home/limitador/limits.yaml - name: runtime-config - subPath: limits.yaml - volumes: - - name: runtime-config - configMap: - name: limitador-config diff --git a/dev/limitador/limitador-server/kubernetes/limitador-grafanadashboard.json b/dev/limitador/limitador-server/kubernetes/limitador-grafanadashboard.json deleted file mode 100644 index f0f3b9f1..00000000 --- a/dev/limitador/limitador-server/kubernetes/limitador-grafanadashboard.json +++ /dev/null @@ -1,2154 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": 28, - "iteration": 1604592740368, - "links": [], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 67, - "panels": [], - "title": "Kuard Envoyproxy Sidecar metrics", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 1 - }, - "id": 39, - "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": {}, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(envoy_cluster_ratelimit_ok{namespace=\"$namespace\", envoy_cluster_name=\"$limitador_namespace\"}[1m]))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "ok", - "refId": "A" - }, - { - "expr": "sum(rate(envoy_cluster_ratelimit_over_limit{namespace=\"$namespace\", envoy_cluster_name=\"$limitador_namespace\"}[1m]))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "overlimit", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Requests per second (global)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "reqps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 10 - }, - "id": 65, - "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": {}, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(envoy_cluster_ratelimit_ok{namespace=\"$namespace\", envoy_cluster_name=\"$limitador_namespace\"}[1m])) by (pod)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ pod }}-ok", - "refId": "A" - }, - { - "expr": "sum(rate(envoy_cluster_ratelimit_over_limit{namespace=\"$namespace\", envoy_cluster_name=\"$limitador_namespace\"}[1m])) by (pod)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ pod }}-over-limit", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Requests per second (per pod)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "reqps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 19 - }, - "id": 20, - "panels": [], - "title": "Limitador metrics", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 20 - }, - "id": 68, - "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": {}, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(authorized_calls{namespace=\"$namespace\", limitador_namespace=\"$limitador_namespace\"}[1m]))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "authorized", - "refId": "A" - }, - { - "expr": "sum(rate(limited_calls{namespace=\"$namespace\", limitador_namespace=\"$limitador_namespace\"}[1m]))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "limited", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Requests per second (global)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "reqps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 24, - "x": 0, - "y": 29 - }, - "id": 69, - "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": {}, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(authorized_calls{namespace=\"$namespace\", limitador_namespace=\"$limitador_namespace\"}[1m])) by (pod)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ pod }}-authorized", - "refId": "A" - }, - { - "expr": "sum(rate(limited_calls{namespace=\"$namespace\", limitador_namespace=\"$limitador_namespace\"}[1m])) by (pod)", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{ pod }}-limited", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Requests per second (per pod)", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "reqps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 38 - }, - "id": 13, - "panels": [], - "repeat": "deployment", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "title": "Pods ($deployment)", - "type": "row" - }, - { - "cacheTimeout": null, - "colorBackground": true, - "colorValue": false, - "colors": [ - "#F2495C", - "rgba(237, 129, 40, 0.89)", - "#299c46" - ], - "datasource": "$datasource", - "decimals": 0, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 0, - "y": 39 - }, - "hideTimeOverride": true, - "id": 30, - "interval": "", - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "options": {}, - "pluginVersion": "6.2.4", - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(kube_deployment_status_replicas_available{namespace='$namespace',deployment='$deployment'}) or kube_statefulset_status_replicas {namespace='$namespace',statefulset='$deployment'}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "1,2", - "timeFrom": "30s", - "timeShift": "30s", - "title": "Running pods", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": true, - "colorPrefix": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#F2495C" - ], - "datasource": "$datasource", - "decimals": 0, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 6, - "y": 39 - }, - "hideTimeOverride": true, - "id": 32, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "options": {}, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(kube_deployment_status_replicas_unavailable{namespace='$namespace',deployment='$deployment'}) or (kube_statefulset_status_replicas {namespace='$namespace',statefulset='$deployment'} - kube_statefulset_status_replicas_ready {namespace='$namespace',statefulset='$deployment'} )", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "1,2", - "timeFrom": "30s", - "timeShift": "30s", - "title": "Unavailable pods", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": true, - "colorValue": false, - "colors": [ - "#F2495C", - "rgba(237, 129, 40, 0.89)", - "#299c46" - ], - "datasource": "$datasource", - "decimals": 0, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 12, - "y": 39 - }, - "hideTimeOverride": true, - "id": 37, - "interval": "", - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "options": {}, - "pluginVersion": "6.2.4", - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "count(count(container_memory_usage_bytes{namespace='$namespace',pod=~'$deployment-.*'}) by (node))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "1,2", - "timeFrom": "30s", - "timeShift": "30s", - "title": "Pods distributed on hosts", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "cacheTimeout": null, - "colorBackground": true, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "decimals": 0, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - "h": 3, - "w": 6, - "x": 18, - "y": 39 - }, - "hideTimeOverride": true, - "id": 36, - "interval": null, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "options": {}, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "max(sum(delta(kube_pod_container_status_restarts_total{namespace='$namespace',pod=~'$deployment-.*'}[5m])) by (pod))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "1,2", - "timeFrom": "30s", - "timeShift": "30s", - "title": "Max pods restarts (last 5 minutes)", - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 42 - }, - "id": 11, - "legend": { - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "options": {}, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "kube_deployment_status_replicas{namespace='$namespace',deployment='$deployment'} or kube_statefulset_status_replicas {namespace='$namespace',statefulset='$deployment'}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "total-pods", - "legendLink": null, - "refId": "A", - "step": 10 - }, - { - "expr": "kube_deployment_status_replicas_available{namespace='$namespace',deployment='$deployment'} or kube_statefulset_status_replicas_ready {namespace='$namespace',statefulset='$deployment'}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "-avail-pods", - "refId": "B" - }, - { - "expr": "kube_deployment_status_replicas_unavailable{namespace='$namespace',deployment='$deployment'} or (kube_statefulset_status_replicas {namespace='$namespace',statefulset='$deployment'} - kube_statefulset_status_replicas_ready {namespace='$namespace',statefulset='$deployment'} )", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "unavail-pods", - "refId": "C" - }, - { - "expr": "count(count(container_memory_usage_bytes{namespace='$namespace',pod=~'$deployment-.*'}) by (node))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "used-hosts", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Pod count (total, avail, unvail) and pods hosts distribution", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": 0, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 49 - }, - "id": 9, - "legend": { - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": {}, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "repeatedByRow": false, - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(delta(kube_pod_container_status_restarts_total{namespace='$namespace',pod=~'$deployment-.*'}[5m])) by (pod)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{pod}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Pods restarts (last 5 minutes)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 55 - }, - "id": 4, - "panels": [], - "repeat": "deployment", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "title": "CPU Usage ($deployment)", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 56 - }, - "id": 64, - "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "options": {}, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "refId": "A", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "CPU Usage", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 63 - }, - "id": 5, - "panels": [], - "repeat": "deployment", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "title": "CPU Quota ($deployment)", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "columns": [], - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fontSize": "100%", - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 64 - }, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "options": {}, - "pageSize": null, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "scroll": true, - "seriesOverrides": [], - "showHeader": true, - "sort": { - "col": 1, - "desc": false - }, - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "CPU Usage", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests %", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "CPU Limits", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Limits %", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Pod", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "pod", - "thresholds": [], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod) / sum(kube_pod_container_resource_limits_cpu_cores{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "CPU Quota", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 71 - }, - "id": 6, - "panels": [], - "repeat": "deployment", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "title": "Memory Usage ($deployment)", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 72 - }, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "options": {}, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(container_memory_usage_bytes{namespace=~'$namespace', pod=~'$deployment-.*', container!=''}) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "legendLink": null, - "refId": "A", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Memory Usage", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 79 - }, - "id": 7, - "panels": [], - "repeat": "deployment", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "title": "Memory Quota ($deployment)", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "columns": [], - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "fontSize": "100%", - "gridPos": { - "h": 7, - "w": 24, - "x": 0, - "y": 80 - }, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null as zero", - "options": {}, - "pageSize": null, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "scroll": true, - "seriesOverrides": [], - "showHeader": true, - "sort": { - "col": 1, - "desc": true - }, - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Memory Usage", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [], - "type": "number", - "unit": "decbytes" - }, - { - "alias": "Memory Requests", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [], - "type": "number", - "unit": "decbytes" - }, - { - "alias": "Memory Requests %", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Limits", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [], - "type": "number", - "unit": "decbytes" - }, - { - "alias": "Memory Limits %", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Pod", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down", - "linkUrl": "/d/6581e46e4e5c7ba40a07646395ef7b55/kubernetes-compute-resources-pod?var-namespace=$namespace&var-pod=$__cell", - "pattern": "pod", - "thresholds": [], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(container_memory_usage_bytes{namespace=~'$namespace',pod=~'$deployment-.*', container!=''}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(container_memory_usage_bytes{namespace=~'$namespace',pod!~'.*redis.*',pod=~'$deployment-.*', container!=''}) by (pod) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(container_memory_usage_bytes{namespace=~'$namespace',pod=~'$deployment-.*', container!=''}) by (pod) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=~'$namespace',pod=~'$deployment-.*'}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Memory Quota", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 87 - }, - "id": 15, - "panels": [], - "repeat": "deployment", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "title": "Network Usage ($deployment)", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 88 - }, - "id": 17, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": {}, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(irate(container_network_receive_bytes_total{namespace=~'$namespace',pod=~'$deployment-.*'}[5m])) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Receive Bandwidth", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 6, - "w": 24, - "x": 0, - "y": 94 - }, - "id": 18, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": {}, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "scopedVars": { - "deployment": { - "selected": true, - "text": "limitador", - "value": "limitador" - } - }, - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(irate(container_network_transmit_bytes_total{namespace=~'$namespace',pod=~'$deployment-.*'}[5m])) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{pod}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Transmit Bandwidth", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "refresh": "10s", - "schemaVersion": 18, - "style": "dark", - "tags": [ - "kuadrant", - "backend" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "allValue": null, - "datasource": "$datasource", - "definition": "label_values(limitador_up,namespace)", - "hide": 0, - "includeAll": false, - "label": "namespace", - "multi": false, - "name": "namespace", - "options": [], - "query": "label_values(limitador_up,namespace)", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "tags": [], - "text": "limitador", - "value": "limitador" - }, - "hide": 0, - "includeAll": false, - "label": "component", - "multi": false, - "name": "deployment", - "options": [ - { - "selected": true, - "text": "limitador", - "value": "limitador" - }, - { - "selected": false, - "text": "kuard", - "value": "kuard" - }, - { - "selected": false, - "text": "redis", - "value": "redis" - } - ], - "query": "limitador,kuard,redis", - "skipUrlSync": false, - "type": "custom" - }, - { - "allValue": null, - "current": { - "text": "kuard", - "value": "kuard" - }, - "datasource": "$datasource", - "definition": "label_values(authorized_calls{namespace=\"$namespace\"},limitador_namespace)", - "hide": 0, - "includeAll": false, - "label": "limitador_namespace", - "multi": false, - "name": "limitador_namespace", - "options": [ - { - "selected": true, - "text": "kuard", - "value": "kuard" - } - ], - "query": "label_values(authorized_calls{namespace=\"$namespace\"},limitador_namespace)", - "refresh": 0, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-30m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kuadrant Limitador", - "version": 8 -} \ No newline at end of file diff --git a/dev/limitador/limitador-server/kubernetes/limitador-metrics-dashboard-screenshot.png b/dev/limitador/limitador-server/kubernetes/limitador-metrics-dashboard-screenshot.png deleted file mode 100644 index d674b48e9736b379872a2506a79bef664afd4cfc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 70285 zcmeFZcTiK^`!5>1B49-X1w=qPsPv8^A|SnYK@jO3r1PSJf(V2ry|+LTDWN0~Py_^| z_uiz2&>?iriiP)kerN8?J-@kU=KOK@Oa^wc_g>F>)>A%DS^K@Rq6`@cH3q8s&-#x^UbsmR<*LT3D6g;w9rut7qi<|u@Kg)p=o}4=2ILv`Ih~`l^ z1L*yIc!kt19RGU)ffz|W`S-V1RP;1QKaH8aaXvcni@DTs?C8w!@&C)4G~&Y+dd5Oz1_6b}Q;&cNaxC4J&d~2M}y*PKQT>=%0ltJaOZ{ zefui~HT4~dsrdCX4Kr|v%9bl;n)1G8bgy7^M|5$iAH1L(ll48^l`y>? zV9l==^)kCqo8y|$P4c29mdwux820e!;vcRL-a2`Ty;9rG zRkNn9Qaif00dHCF-BU?_9MSI`NzY0D}BDb zGVneR51(g`wGPMQT(l=)5iYV=CPhZwrM3Frw0Ya%BX;`*G(j=jQ{-Ulc?`dGUj`0Y zrI_rqll!72K_e|>t=~`8XvXLPAwEkn{*J82%&^;f>4&X}NJ(NpQrK-ye_UK}5FJ2% z1NP;DI7{|;SDJM2RSFt7rIFWzUVWz!#<`7N!_E+2jEYrHetQC)uX>Su+fxj+>jE&Y;qCIN~%vnsG%8SW6#L0+gA~V=H=L}%#1A9;}Ghd)uwak z$rce_?>gtqj52>@Lc&e5>&t%Rw4$@^c$6;OWBJR2#+H_zu`g1Q2?_Zg8}oDVcGwx^>Ck=#=$!(#))+NJji!~dVz8z-f!>-> z(fr!rYgS+U-|lSBxSQ(>{AGfqi$*T}FyAnmjXpJktNiC>g+%^lnl1z9cIkOHJtNtU znL~jGVCJwKDsKD6yK;w``tBUpg28fT%5&RfaWi}6qHsN|T#L6uZVIWsg+F>MEeC}v zF~$rkn!b7zyLQ!3Oe4IXC zsE&!3iAmb-=J#-hjc=1mIfsdEFz{jHY^{?11CQVF(D7cMvSIhEw3v+uBU2)F9mbd2TpDn;D#R1B0+!nf-3 z8e?|G6X?T-QcX6@oMH;lt5j5gf~Q})Uybw$=;-uAGeT|%%mF&d=SYw~B3c$ybI^?C zcgvS2;hK4jy&CU<8SI=?5>1_*eQD61`g##NLrZp(S6L>MW-3VdFPZia3e{gs=H}+c z@owvxnVRMp*PKQ#jbP+YI6^<@U-Y%$z)$~!}sssTlN=b&UeGJsd!$@E{`*ioIRUUjAzRq$GtbQYJ2^f z*Z3<$d@ zTvhtOZSJ@>y6IMltMJDwxQ%+pn53mgLF3X4D-lsO<8AZZdF(crBj5i+eU3g+{eI41 zxmB;b!GqpR`5D#8X>M(0pjKbsp6{M&Y?*I6AmOv|C~$OiT<=m-!mZTPbxy^Ub{_2S zZY=p$!HA}v?1q3NH=(_ru4%$frFBtE9cgQ@zk@?ImQLkrcvW~Noip;uE&kv3Vh8w37I2n5nC zoq2F)c<$`%zh7|&{tMI~X8~^gdmtGyPW=5JQt=^-{ODx){=*}6f>byC=S|%I_uaJD ziaC|7rkHJ@m|gUvA;36+z;-kfzw-_5Z=C}Y?vqM=`I1_wz1Nr&_#YEx8Lz-Dc8uGz zu&|U3`TffPNsF27cxOeb(-Kzy;@>?AM(ckc{NIDTe{iyL05D-X92zq=PHYaxEE{vU)NjGQ5d*1#&r8KQVNA%3sz0);{&v|Gjz3R%VM%FBr{9EcIe3xmVk)n zHp*IG|KK>7t5cMPO%_*(rHUQEdV7|y@!5^&8~O6602A7c_12R*6%CarqXVC;sCH%Lf)dZgXFqV6Uk7k(^FEV{Wvy?d#jBPue>D2R*xXjSVgB z$f^TNSJv+6s;a6a-*I>E!+nCV^X%vTsWR`4shFMF$-^ACrI8(=ea&1%K32Qm{v~pL zMW0P-NjN`MNdnbJZ=$XEOn#oF5pmUz=eN0!DZcDMz^df3GM}4ksR z>d%Y}i;c}(!p6gnomlMkKXpDbCdS-VmPl{QiQlnF!rRwpE?Cu)^!Ae4jwWSrSH-17 zbLzbWK;E+Nb1JR-Ph&nzaV&7sS?8|YdD-2l0nH$%5q`NG_8=%Is3&do+T1)s3s*1X z6jf&Yy6yv2W{jbSPM#KAQ(Zmsj#6xflEn5?iM?g1w>F#e-D+;5PNBeoZpaVy<92S|wW9+g+t`$d@8P=-}W$ zEo_|=$A@`TADY}o81+Ra`|jv)>Xoz&E>*xABA9WF>_}C3+&zG9%168jVzM1m_(7{# zsn--#6qx{Y>y>*C7in?BwXyo!+uMr{D{wt4z#bx$issURxyBeFfJ|7<`({ULmsAXp zxF|kpOZn#l3ya{B^OSpO+}KaI0&7w?!86CI&vd02p8yz0J2~`F->MhEsWAwxm0$0B z9!kVFARJR&U47C87 z1&FZNb#W-wJlXp(%Bq(bh9r3&=m1|nmsmO)RWk?t(-u{)hDtmcHE)}Q%${0P46-fa z&_mVT-Q8m@lhUj}dwn)N)Z*~KduN|8%C}cAPSK(ly}WE+xwE9TQFDREd6%X%Po|ZY zqDM8yqEnvNeYJF;!d?FTCY!FWG(O<%TY&kEB8F{yKi#uqWwi)Qa^Ixtwo^7rO-=Qf zY!@^S`MhDA!jsty+0myRUyXdaQN0FvfIsZFZ$AW{F8}_0 zo{fWDI>5l3AKn)MKn>=`Y7YkOWA(MQKN)(=Nl(qp;Pwd;%C?4v@A2Hu{HWU6+PPrv zJ1@t-8g(yi`pmZ%1)7+ctWU^u&$d>2ZXm;z*o1_1TEz(;{Qc#6KgGu8?(Y)V#l^7+ zo)-X*{pS7If0_C*j`g`g8SkCVaK@>rDT}Xf&gmAIWFoLTmfbK7HM~Hku$;Y%bHaBpV zMkk86$~o?9>gs5@0E-uiIS4Ab$IhcRK0Zz@VwQD4Rh=fuUEGca-8dWaHMGO*xIQ4+l$ZIyp;+@=uddv#<%vefjcb z$8?BUPHrGogIz0E3u%~X`SJbxaA2vR5o$w|z)Yn8Y)71?4M2DDF&y0?rykfioI97; z2*=~zk3NBBJ>AxnkLGBfsFm+3cQSXG>(F73=I$+O70s*kB(O3tNUhB@6H$d)rN>5O zbhM_WST1WWOb=;WzJC@hL5jz%KgMo(&@PzQeA9Ykp#^hwtBWJNEu|xLqFaWT8-%eqFvd>#@2iGNh_3nP`g z`7>e$j*f+CX=!ZvnRhds1QX3lRN8mghwSDM&*s0aOhPk2xaYkyXAaFMu^E(w0z8qQ0lRzm_@ZLVl@1OL<_A!DP-qEmwjdpvAza`xv|DUD z>>(!}o)(mm!6*;C&A=ePA?*MX7toBh&&q_duX*zz#gGod>nO}Zf7gre_M-@$qG=2faI~zB0=9muOfBfKZ8UcB-Jn&f@|L}pdaOo{r~R7*8G8ueX&|g%6Azs(uTXHR}a+2Kg$id`rZq)l2nwzK9piGy>8}(HMF!@Wo5zZz=wIbR1${}4gKw`AS^F-OYpj7 z&r)e`i0cErB}PXe$(QN5ST18TSkit)i8KZ{WthfDWgI8g#mc}*LBM;jT^ot`rF2uj zEiPn?7nrL~Kn*Zo447d=u3D8AVR3l9vC2pVKj5;yV}-N==Z5fH7u+ilmfhG{Yn7-( z8xm=WdX2=KB>wa3sQB=QvR`Na7Y0B7AID}ty222Km1v@xdf@Q)79EI|<0osxP)!Im zU&l8jGaDCI%ZJNHF)AdKhchGi-MhATmlz`BBl2cvKi!{s9~9If&s~*qL1IhUb8}(E z@$DaO0=qc1EMQej%=%RF(C&c`(qnTWHS(vrY{v&WALSR115T4u=Kup% z`|aCH;CiS9ToV{AfEIV9N_qBM^DFFGrh$OK@V)21RSs-jaTwxufN{H;l0@!@A5WWO zd9sJL{r!)xFMqxS?47EKLMricQiNMIHC?;9UQEVkhu_7;sdnrNH88QMu@+A)-9Dv=rLE1K&kw)C> zd*$By$$GP*enwe#O@#8Ae7x1p`W&&a&}J-!dI3@}goU-Kp8jGBwNPt2myXZI-uO}9 zwuSp+X6qQcv{7zr#cZhnl^s3goZIN5s$H&~5djT?5BHJ~DMjAm{zoUNs@*#~mGiJ4 zGk14}1=^Re!PqK$^@cDmkR=4uRKfMEP;Vo{J${bMfa$u{P5JEFgxC4TwAjApx{E~*V?X*36n+ur zePw?UGwVsD>CW$g-yi)%>FsNr1ErHF9>f<8;^m$Wr5x@r`BLATSmEI+0s)o_&I6T} zmd+%*Z@ZM;)}{<>hsL>c=SWD%qq{9N0A3qz>UtmI5?VD6%R|}h^1CdJ7`#3skpn!i ztT$i)pONp(n6OTPP3}!u58B(sX#tqeMmlxaD(iET!go5t)B* zpj+-3b^reT$jD-;U`p=xG6y-Tm{X*}GWdH!3b6Wgvo0!+cw{yczgvv!@V9+ixJdW1oy65ov z@)d!|>jH%O^0?i}A=Nd-ofptM-aN9<^>rR43OH`dLF{nvJkvC)-`eAF|3Zt+^P?I0 z6)7`SEnYr4FAIzHVM;6w@o*~M-JH{!_*cb8s-ynRApJK>egCcc;QxQquhsc)vOJc$ zqdQMai*TXL$gWbXP7Lv)IWA7r4C+}1YL;T8DrSBB44q|bl9(al;?eavHxFLGw?G{Z+>l|}i%2)@-DKkvyL(re zyJ`>m;%Ra!(5C3P5rlyr8SVW?ATles9w*_8#U}F`K%GZN zmo&ftkDbk0GanAkPY$JO)MR5+TGRL7|265joZImpH-Rf%T4d-?CfUb zQoKNVJ!b%gn|<#~&J4o43-4{N!LNzMeGgw+){gJD5F&YhHnP*=36wPb*~4)%0$uuS ziAo_^O<2OID`c|$7Ud-57gn`Hppl75>0`^){u>k&V=J>#_J4Z;ur`7QzKLetI@W_9 zz6vKcDz>_m&anmz!-6$HhEwXJSN2Wv?3>%UNOt5+g8IZr&eiaxkDtk#6}I(LIfc;+)t4xejO*BVS5y3JR3 zOtm=*XGBcNR;?3#0yH=iUF1c{O|MoVg@zG+rTA^k{e!A`KH|VqoH(vMN zagc+qt}U!ySQa^)2rAG$?xJF={$;X<-VjwDVsa*XK;J33UzEV54qQSb4^hKgyiN^!;lP&P)Mt>$y>0s%A3D&ZSUnmcw+ zwW7`+J+wjbLrq!T*kDoj{2k#1^6hO;*T9|+zZ7(g-({5fvR1KsosHno z31E2rP({A}sgq}36)W~0tjBb!IP1G1OUHy#w)h1TAGa4&z1;MVON7!-%}h5*R2OAS zSi5f*o<4f47 zWng37vIYF1=ByR2@0c6eE!VW8zgm?KIA4=EOHp-55hChcC>N={OwG3cDEh03s9tvX z46^J-+W8*5N982i?V6{x2&4J<8~=K&aT%@JqFIoQ)@H8tslYRyV5*)O{se=T`HI-`#X?c$3vcfMYHGlYYWfS?wndOaJ=uR!O&5(6ynseR_uwV z$18|Vw7VWlLBpW*2BxNOR&BN|U1g+7vRz+=|pwp>kbW@aV}1=VU0yW?=LFN{Vo zcAw2n^#HN`gVzC|_=PGgDq@la^A zR6cGM+x04U;kOy+Vi?p*>oC`xh@K1F`2VHf1~CoNY<~mDyo~^Y0NVD2A)!Ud8y@m9Yu1v=|68@2c%veoO71b zdAwQPJ?Z@Yu3Q!Chxg=~a&o#~Zt%#y^*yijLwoP1T{tK#|74~*7O-pjk^PCIjq3Of zNgc+1CV4+|r8gq01~?Yc0_9h?zn~o6+Vv^jK$DyP^zD~7fmSX9E}JwiJ1a2% zevSbAP%xX6`cpEqgkp+>oO>jSbvvxptzmnlI>$+r#;%}$j1#c>6e*BgT=HchWJTTYdh;P(*i$>MNGHc zw_cO;2kzxRURl*IrD=hNdM6$X=?WIEvv`Je4rgTx9#1UP4l&u+yZ(v-uarLaX%W%w zeaPaoT5c~Sb?{NLe@zS{apJ2~WMcFT`CHQ`*9Zllw*v2x_Bwg3JR788%==c*b7^sX z?~J~+t4_WT0#e@R5$vm-kwZ>#{*zL&=G6~_j;m&5oA&P#%wqWZs7?G;5Ss+nqe&`m}*HZST_&YsyS=?3-W0Iv+== z*FDMZRLWL$64T1an0=?5C>pE(x$*0naLl_j)gTE((J;_dBmI1) z$sMMBiOsucLFIA#VYZFoyDE*xNr4h=FA5qzQ<>2+vkO0_4!~_!%$|57UE>$qSe|4VO^wzv zO5+!cx9IgxZ)_^|EcQ045fT}d9yJn#nLddDd8_vH`W%o22-)j>BDT7m4mxW@CLOEx zOP)+h+%PiH-`^oKbxYWOwlp)tTyr-vJUn&0hE5CAsk%^lS{vBhQbYIcsQWSf0PFPI zw4JKcDjnR8>=Cu`|*qxoU6$3gc_(-{#sm_&U}fYW?cnur0`eXqil$1|iR3 z=etXx$qum!@%%8o-aHBlJUXFj6u*C!ni>Xz9yWSp ztHDj1`H@P@=?gx}pk~uuRxo*$j?QRaEe_&Wq=m64j&rlg*Ek$Dab^7lu2M(@H)UYc z*h>8ee8;;8wYD&Chsb`WV}7i#pHa@W-eN0OAyhoNrR(L=MK4;|+_0dSXSo__$M@m~ zi_ZQ&%5jPfBqv!wAq5nK(i|qmRplIEd;7tTvLKiNseKg}6Duo_UDU0#MCEJdfd~;^ z@LWPu**;91P6E4tR6GVrP%#}=c0pIZz4p=I`Cqbv`d6E1_sb974U7)x&(-swyj1%o zJWoO>**c}rAO~bQcH+cv4YOiGo0!`uj<#eF55THv?i2`{SnfGUy?1c(O~@;kXyN2r z%CNg*0ZRLfvPu$b>h0){biHe>RmvuY0BI6N566c1PeaD8UA>lKSc7N_wc`}PT>Zrv zWofgl7gj5j?=6S8G<^@p9&^SQ%VM5qT) zsMByuVm3OyAi?&#Qm@W~ z^XWr;dOSYQSNwW2Um`6;V(~795t75JFM9^EoP*_d?c|>9`5tX`L?0Q`l6+^y1NtbT zkXNK2(=k@zQjSEC+lU|-L?W8GeuuXDe&n`72s%T9{Mpl&b_Tz51?ArWeimXsve(Q7 zfhbp^mIn2y>Whl)CRL6`DGPCOV=J`%ggEfUJG0D{^)&a{67OjtCctL$&06o==jdpm z*!?@m*1L$nPL%zEC7;5}l=y>gX`$F{S{n%oZX$y@qc z0Gop=^bZptbj4Ov$MA>yIkzo6H}kly9-Wy(%LvbB6Er=F4IqLv#Hn#xT{O}on|zL1 z9UMIx_45#E{7174kCOIbRp-}d`O4FhT#-UQ`%8|cTo3LC^M}RgI)qzx<4lhK?y#`` zBA1Yu@dh&imbm)VN095b@A+yKnA0Q%N6jAg#2fr>|B=je=t~?}UsE(_U%C&NVt$$Sy>d)>zP za4(ox#}S&~i?QR=-E~Diqa@sgs z4dBv7jBMQOqYI!Hns`)jIq++B9?Q8&08piEC@)a0H)1o>Ba9!b~p{R4e?o+10>tl_2FDzxy zG;Oj@o}_zmdW;GTqTI}9dm}H7Qrc?n8yT%lsZZt3$v;_&Qu#-36It~S z$O`*eB03W}H`dOun>B4N*wyvlrlGYV7D_>#PFSQwM8kn*K38Q>8$W3OZNU+u57|-> zhUC-@tlMURaXm#-e(Rs41mIe+elV^Gw#n~`6+!in_;(@ z6Gv6%#u8TmaBwzPjOZ329PfWkruW=z;ku>szT-iVmlJ}&%z?^j71Y)HwwV~Gq zt!+)K5mEi|M`5aG7+VxozZ$H7QB&f;!uwg~s2xVj{bc*orK5JUKlZjzB5bsfsKz;U zKqpe-EL;q(fayHyw{ZWv2xCP(ts|)|Zm9cUg}bj4hhqQ%3aadmqMlFxU#kic=M-R2 z!t~x$$K8krbMmtc0%8dZS9B}s+Ub}5D?kw0L7!cQ(5 zug9J3n;iZsujD@)Jf&->7)Z-G;*eL)?dw_8B+&eQke>-TGgH&VQZ~BT>ep}q#Nqy# z?$ys=5K&@b5E}d|W%hC3gk4#b6yZ<8C0}&!_rNS$7W~y!qIS4g-TPFqjfcw zE;b27emwqlrTpxu9L}66)_~Zw4-U4^Q!!$e?UnD3KgXK>oj(w|7&=N7k8)rp&kwBKTpLr!ihhNy0ntmO~pC$6Mlm4K!r z=r7F!9bSv$@7~#S8Ww9Od#&bzfGi(}z-X52dC7LX;l_4VxUc70 zb)-B7L^gUCI7O#c)lC#{0EW215d)Uf!P#BS11mtgd4xo_8MAd;(_f;8tMU8 z)db}5=n~bP)SICKt$$!RnCh_enu6}#LrTyDBM&95X)A&2MMX>sOLFemgilOPqD*Sp zz-9|{%z3i&-Ki$yy6*p_!XUUd^$j#3fW8z^z^4}9b0GFu8DLyAmf}5dQ87lWPO%0U z*TjP1^WjL_Qe^sQB?CfAEc2#NZgU>=2bVOo*pon8B=2`__NW5#KjHE#`-XgppIp@w zcS<|*1)|mEb`y%$4A%GGez5{22wgaO5sh2#RB{@g^nd+&qaiz)g@fZM6aZ@Ei8JJu z^&c4xTy@UBefyRbjG#?O z8x#jN<|&wdxR^X2oaa+%Deacal`x)5_E?V8(@eHIj>eO zhuk29t3uVLmW(3h2k@?ydmX{BD|&i)TFCBsjXX1yZg-Y~RS3;)fxrLj_3f<{M`3y2 z?mLiI15@7dIKo>`(^8?ot`0(5xRPyv-pI5v{XW<3)O;`8c*2A&#e?UeR82yF@sj}h z^yOrdK#d94FJUC7e>(qU=jBcN%H9kc(0B=edt@}1LDr8Cq#L+iCD3=}-_NAM4nm`; z`?_?Gs@E7xA6K35!X+Zzf6ak%NPaSaga>e5 z&n{IWfFb8FPdWurj=j87sy=bE)d%{gC7cpl{9H%4fRGVq(ET0Uop1s)-rQA*ABlO{ z8+4+vsqt?R08qsZpP}53`?}v*TlYt3AWOj4K_X7RJu?q|b9w;#3i;Q;q)_VtUzfOB zN<;|0mw>ze#7Gpa_}X6m-#~6`Z$ZcH2_JWmy88ZIK;1GW05^W;=J}BznJPgeD@SyI zzYcu}(X1T@u&|qi_@G;lYm5DzcCi3FPYwT141%IPpbwBUIn7W`OEt+{ct!?0nu zQ*^*~XN3c=UDBFv^QXl7(JPR@c~^?Y)(U~@ER6@wjECsz9i6cvhE?1{aH&DXPk)db z#9s42+eNZhtVb5$IoADG(L@IVsRF^3**lwS3|3(h$NubrgC&0JmzA3xZ3Mn2;TV`- zB>gWrP$Y8h*D=N|f}P2*?%%c?;_o5)YCixS6rFSkb;8B?H}}jYRxe%eZcrxlyzgC;xj;bzeX@ z%%a|dNU8h3}cEw^S)*j;e=ufRe=f5k8d`U+>%`M{HI5vLL* z7&!U|9#y|(7U5I^gRA6OO@c{F-UAnTjZl=9w$+u3OgVot_>J`;%0eZ3g)*ETyy1L( z#L(g@Abza#t7W%xrTp{t`uW#?AsIv+wowG_+D5zr#3}>6`YGYOwAh(i`j50wGE<~% zsth1eb&$>gl5!C&5nWH2P5x@3wJ=zhzY-5I3>Oq3qB|lm1PFH7IC0Us;YdhXsT%qZ zlPg6+Lt$8g#DRE`DZ}}5*EcmvB#%!-ryLJ*sfDBb_ucgd2OdgSmY_;pKxJ10XLw4_ z3RUvbuF~Z^TMA3Cq`%}X$%kKW>q$mmEac{KYFP@EeR%w&e{3v|v0-cd>oc(tPS~~B zwb-TDhKiXVFPE}ZLp}*sFRJ>zlX-IH*6om3QT6y}yt)*;e(_Jh6M$0O<6nvUEn(PP8%%CZMoL*;i({3%htLRu76@TNS|eM!fi9fackjfF5)$fWx45* zG0i%EG`TueOhdRhTTZHQXTX5uVmNV_58)A~Ew#S*bl-#({S~=e99vcKT*Hg^{oa8< z^44v^Ve$~HS58mssEOO}JQGB6>vKkhkhCx%Aly~!_@kS^pk1POvP7>I1~2@{fk4iR z=v58DX{A&nyNZH`Ga;AWhLKmZBJ~a#nKE75KcYPGf8r*{S$TK5tL#>m0mEjHOSNWh z#~>H2bp$?k%oBuy|1|r*l%H#H#OqdRsnH!qKkX##E6eKbVCIG^*N)8lg`7)Zdw8t_ zU?c_52mW5v5Xm@)s;We|uT9T2HfYVqSs}uWpCtPfO4@;pAtS2%CPc9=EFUXejFAi6 zRPX<5RWA0FAco*jfA3?o-EG}fm6Aoi_qeMkk0%;^e~A7WN$j>Gc9GZ>a7e3V4W&?( zj@&`^=204XJP;BRg252t)2oRhZUHcTNcOuQx#{bRQvbmX-)MCsJlWP~D+!Jsq2AE0 zm_ydHTYZw|*U({HLejv^!=uS=2tPe_>Mblcm)Hdi?|xT&3C4mHonDQmAMPtKe>t!| zcV7WH-MXv3RCS2us@l76;5qE#C`j6zEMauGUFF6od$_S5@4QkQ;rzMFcOPsuNP^|T zoST$vRV*xWL1$wY*$on2t7#6F(9&RMy(b&+;W?Lt>eHu@w($-zAPC`o*>+0-{R+BRwofkG!P6?9o{*r=^o{xD z#G7+*4mkX(MV zIXCC0$WkCfxj{XmkX%vr^+@O5mF~R0Z8j_@x0ck3!d$&&{e~kWUI^DZQ0QJnO-)VQ zM>`A4L#VoJv>RQunCmmIxM}6yfEaEzA53)V(V6ee4h8!pbYxXy%DCo-^I~>3@!3N| z6fihEiV@#mgmK?Ju}f&0TzLYGp5f?yH_ou$BC^E7#idxq*it17d-}$gT6_FH>F?EYs$4kJI;B|M2k36_Mt zaSkxf`J5u@;^G3$kUmdI1b$61Gi>7YSog}UH?Qy`MS4*b8uj`8X-nb3BQkdYoxe}sV}G3p>y5fC3?LfZ zj8YOpR~NuNxTnb8G#h&NU1MwOe8YpaCb=h1!WcpJ`DHKSG6f|iE2mAy<;#~rXCO#1 zw<;TdspF5KQ}Q{`RY3RVDfrt9u(7e>;k0Qjw*Q09 zOsAuwd`&~OP#!vH_wSmeC zS;G-(U|cLv3+WdmI%Md4oV(CPheCS94|ILd)2FjyulisgeRyz27mizbwLaw3N_SWX zjC8^Kc4}z944oM2$6r;06w@!gHZfCB=*{M-D#l!`)&zzDl9KslW>e*`UCeanxLcOx z(+WNI1LUn|lYe@Z&Q^HqHSx|SjLZuNYly57`VGjR#lJg0M|)UvrCEZ`{SK{P+bsC$ zbt^vICeQVZ^U!>uFwKAgRolHkPdGUeCsd36S@IPB!|0hCtDntnu8b4Us(!Fi?W||& zcWZe9H~AYJ`H{`%i47!R)Se@5rlII;N-#I8uaRTULoVvn=?gv5b9B4h_UECOY$wz3 z)WV2llgsJAHGMusPvaN%UiL#zSia9d*-H#?VZV>FC-THuRC~HqTw{*)fjpDscT)wL z%TTi_V)Y)PNQ%5X@!K2O!0slzfE&|8s+XiB}*nmy?^C)9e7uSNqtQUR2 z?K=XO{qQ#Mf|}-(gMYJMes|UM43RWe1eSg117MNgTm@96$XA($x#kb253G6|A&83s z{2W_y49sNFB0Xk6dcOm_f0oTHUmkqb6L8At$jkZNEdk~+1QQ-w`RFG=ZWq;c)H+e% zE6iU&d2w2fe?jZ%!${S@j6Q%AE(6N^v+BdH)-pcIHLdYE8nf-(PcuqZphm!P(*u}$ z>t$h9mQ{0t$ReJe!$45dGw4DNthnb$lALdEJC(j% z(5U%oPX#7=k^)TB&#COD@blHX@c7c&e=mHPH#(Z*An=Q6>VUN4T(~-C_(96|#XspK zh_k9l&!WadOG+2Iwr&8=Qs}`nm50yVVP!=H1IGQUIl={e{nCwb!i@m!{Ju&@ji*>R zzUc4HOy{TDBbGEn8mXx>8vr5QVgab>3eXO`VZrY}!e^i#phFkisl#rA);Ta4=)ZxmDSaA&dAKAZnj(;X{7JAsyRZcGD zqCs}-M(=%2czhKT(0}}wSZ4?}{2Q%7mzB1FjgHC@poHB1ZqB3i9kD~G4s|(kXZ8Nc11La#OSqUht>ndxYNm24GRyrUbyRf9F`8TQhSHHQtGIo<=bM z3vvnQcD1a~+Q9fgnSFf4M%EF#BqeQhv1Laa1OeQ~N4q>nYnrd!i}`bbqQP!SWW&r#?&z#KM*a&cQ9m&2 zHxk*B>$AP}8jlPa#2o@lI|q?7)>ahrfnOpMq;BtHCW9Il(ljp;c>@ezqd) zt+T+WS5b?IZvtpyyT+-T<9qPDgwbMW-2%PdA^&s5gGm;-k9Pq*&`Mg0SXJV?H%O&+ z)+0`N?BYwMG}ziKY8f9}eGRQ^RHN)WLz*|xQ71ikLio;~8Nb00^&}xKRT_x^+LPe_ z@N$RQix8ygzhTM|X2RJhtK43>@3H9kMf+q5BDAk>l3in65q^*EB|pE34TL@5yqbm{D^ z&2)`Rgk)4yD?~-!YdiE}OAq6Ze>oGJk zrc{%m<8d*`T-`@8sm?5=7z+>3^PUa*bqOw@Cw^`Khlr>#etEFBz0-PF$soIfTpG;? z=1$6Zoq2$BCsY#D4Kyn)2yAnWo9NOCRoY6l!%OZm?aa_9U4B{|5KQJkaRA9^nC^Q zhlO5b=rB_XnfYPW7q(Y!TFNaRxb8gj>iS1p=}X!CoxD@_f**AiM;Weohw873-*?%> zmTp`Rn8Yo3*9%AyzrJAf#fvn3Df~S)P;-Mev~JYCWb-FYA?B)Kw#3^v3nFu%ig8b+ zs&a-Fw9Y%NZhJv9Vv7#)IpYQLQd1ugM%+RX=y^5`EiF0}oC%j>#)NSdNVak7(J{|e zN)`uufXta>4JObzZb(D$Mx10vC#{^45>05adF$dd{=&j2cWC|LutAekwAHueOCB?c zZp6J*dgengZY{6e-NnkP=D{KmVemA0(oC+e-+P+4xs!k11|sv2IVclF|0l(Q~CP!igZ?uA!lUUTaCV`tj*r z++7F%J4Spu7bMn~8-~tsa~Zgoj6&sOlwke6L;CO z3KD^9^J&3c9mN(B-HlP~d>~>4k-_)7A8svB+`3z6`&4V>Onvg-S4R1L>@RRdURJ~8 z=pCk4C7s{8^U(3m@HS-vGC%+627XWrSSdMZv1>DR<=$%YMyZxjcZ9E(s$zC3T(3$M zZ2p?Z;Wnlh`aO1HB^nDe+?{6fGE93Nkm+TwB1XJ$MjopT^;53pkr5GjFc=#J1%=hh z1XCOzPMU^;Gor~wFlK$*%VXB<0!*e=9j;xV%I>K39q$U5-5w{j#smVBStPRJc9ZZ< zI{!cRZRS5M90h3x)V$h`5Y1R*>2!@r2`@{H;fs(CJmH3;8VXh47Rp{ z{5>>1x;0Tm2_ShZryms+e2q)WoyZ|4@xl+PjmizWU`@1e+ zGeNO#@8rsC`Am}CZXLv!(Q2N>doxDk2yZ|xjkhJS(QxFpg*kYm+_zVhxQ;6_GSa!7 zyS03uizeU3-6#)H_~nS!TP6VVfm{uMd}CKHLV4!CHca}yO52`cX#I9VI5lQr7aarJ@awMCB1|;^X0R)$qFX^(OnCr)AO3@>#`jPZ`D}`n>E51!p@i96f zxa-Zhk|FGf6{~P{XxIj=yb7Xj(6%^#>N`5*+6L;tKt9^6VKQQ>g~jM5 zRw0@IeMNc2K>BPoM-j%Drm&i@AucKM8cTf{M-ir&&ah7Ua(!rLn7(4e_+521bGCb2 zpO?Qpe)cCriAP3dzC^uPP zmFeNq-r>#WrVn*Z(KiFCxDJ;{8&BwE}wGrO^wfkXc7i4Fj zp^77hy!Cl$lZ3AU`Qa2x%+)FWpN=^zZU^Nf>ucjv2lrr>nHiA75M5o1Ts?o^z?~%{ zI6*JZ+w;SuFK&OXN?F%9SI^`bVFoJ|jb`5dALPAvSd-iLFNhTj#fC^f3W#*2NCy=G z>Cy!RL_}%;snW55g&KiKml}Ey0!UX;5s*+rC!s@t5PC^SGCO)a-!sq5z4!M#b7!7A zGw(kb4DWv5z1LoQ?ax~K^C1`|GElF~os*X8kfh3Ma1t1c{FC@qb6N5XPwfw6DA_c* z)PHhLg=t}BPbtVXUT^D8K)%)jJ5QH{iK&yITqf>A?YC7pj%rFmZeG!Z{%h~@PWc-#>IQzbz2#htZW_hfWUt`Rk`2B3nIL(r#Lk1A$#rfjcHQDfdF{*(FNy zNcmL%*F!ssU(5GmoOHd$9M}sBWLP*@#-|LQ6*&5}1U#rzymYH@JHOeEyP)*5p-w#2 zp;?wPC>lDpQPDkF6K8v;!|uEKp(>P^rVo2>FTTmTXmfN$MWZ}y`q%??;c-W**=JCnPxrVqDvcPZWWnAb;*lIV!27C?8= zq!M+uNrs_P9wX1CzE1Ix-3x8G_!Yc$?S1FI%MwHmYTAS(e@160=L&JJ1#uXWdwLYD zpU!X6Pt3rYpbu}!$=-{oU%EiU2I}X$!0sarY+burAAor;PHF51`>iRh*KgGsJp_Zp zjBp3i*oUwMB{nG*m94^lVbpuCJ(FHXDRUl@B7Ybm#!NPchE?rZ^->ej_Z4=VlRv)u z)NkH9&U~7A!22szqi^`mQ1>EorzX#O;ne9aiJ8c?{PROYdm1TD1-r~rF$1xk$muJ& z$pJ>ny|JBA9MtzN3>iQBC*F2$M7(;!l%Uhj^YdKP`8VemJuda`9DCO6NPO1g$mI4Q zAALNeV^JF_QnP=L?SQqmr`(>nw>$^+o}V1~d|WR$Ah8jVYL!Y+SxI9GouBrWO%C0n zPPqSYx}^sX%}_tIa|oWFf7t(UEtxg4S^O`qU=<}!Sx)~9NPsOI2TMeIVh>027p)#L z2<3nHhV`LEgRGEr<9x8}kt-ZFI5|$OoyB$HUc(W-j85qKOH%JE>z^+8c~+62ylD1{ z?ECi{3L|}gxM(S7Ogk~eyC_oZ!Um*fHivv* zzK~9N1tuc)|fgz9e%!s-3Gte6zd|TYDTm0%PYy zqV!Kxs$9Bi@uE%lNtF{@N6S`TOL$&v9qvUC!s+lH1-EJLDHrv{8{^e0Vp z`$)Zb%_5z5s_x)c+WqieVad-!JEQsE6w4#O+ik_U-!bxXbj`=@8nw$2Zbw?aKDM&X zA7klR#~J0$;3ywWTyzZBb7BkG&9LE?gYPll(`|1n66ODP2&rSz$F3FW@*#u$z)qgb zF|CMgxD$I6ti-g%GE_E1_N)8Kt%Q~UGgEr+N1D<;&f8cv^{KVICP|CZIp?74hyEtx z*gh~e@DO-SJaqgva-`?8mUeD}p?QKneOvBagpHR5_mF znjy?1{aNAx(*4IIqzSfFj5@n;R}6pY=yyo9U1(MVaXSVb%|*W?)~x>dV`w|W zM%qOTEK7LYc!H0k)>t|C>&0BdVO7)>d>=1~WvA`=)TSoZe&^6yzMt<>uiCmAZg`4e zld$aEI4X^~U42kBqLBO~otjZ_fF)5&c#q0+NoIy|#TH|GXZWM=Ud;LtW|!GBi?8cY z0y)hxH*V2;pDJ~PtJ_>P=GhDR0Ji^MzNu^Sl1QDg9Al3YM=`3VUYB5LmT*LSbkGIu zYrmPPnAV95)FH2#L_X}J0o~VcRl45z<-4$|wjK=+-|$^n%A`mN>1!WbV(X14)J?lG znEPUvJGiR9$o@tB#6HM+9|`$r^=ec@z&ns)!;8_UfwJ8$O@NA+f#7P){;&O;xQC9$ zq3|C1+W0G71?jEfuYHq*F!){#n}E%wI-kPz6JLJq`NPI9A>!IxL?Bqdz02aUsnPOj zu-P7;snLM>ru&<_7NPfGmN)N3WYdsj81;VY>!!g>0Ke9?Ul*Xg`Thd9K#{gqv2SB_ z_p;%Mhp9~3rzJ+&6$T5G#}4fAV=%|;N6#RV4{Lv`3lbW(cbqYpHP|)LoAfc!n=nqh;_oo4X zC;z0l0M|ywZu-KWT%PdNI;~OX-R)Q{5mAS%?<)GXsAN$gUB>eX(-0VGP5P+ABE666 z?5_~?z0vAUj@n|q1rk+st~&P-?^rD^%TdYuZ8J6Q-L7&8<|mtP-14F>z4T71@`B`o zRD$cEyPw=V-w|BcWtpHGq4*KQyw_wt{y32~e%+rd^j>oeUe4xE^le;M7KZ#5pZTVd zgT?m$F7gaJyPz1ZY_o`-a{*vNB8mgo(}7*fu-*1=G|MPib;O&c+Oie^h}WqHS+S9U z6$Mz4Hvm|#o%51qO!I*xV}c)Nj^%zje8r&WD?_yyJHBVI*UG|lygSRsTIuxHdw-(= zcq1e6;N&?p+~4|J3%k5{M?uD2?vlnat|+YSTsIN_ggnjryt=mSFr=Mi!3JaMuidqz z*$v%!t`pzE4r*Y5d5639Ru8{=a_n-yn%yY7Qdfcf3+v#j??rHm>gC^7d!n?^S+v6j zkKRLD^MBjzEUWYJj10J_u;JgRv2GTKY#Kc_@8cUa71`inyfS1~ORQ$ipMR6)oWD9~ z#q z9XFL=9bDg26n{{4swXu$#2$Hg)8pVqfHQ{>ix;H+EMhy9*ppoH+;`%>k8hj=JhBHm zFaY)pzZ;Z*(=x}j%Iq{oxXq}korK@Ymp z7EtjWQC00+gqK?-fV_I!vf%|&XK3be^%|)xgwcTmdt&f3l5U#hXm09_4CNnA;}xyH zn_Y?GEJjNCC1`k=`Jdt8O;Z@n6`UFin5O?=QIE=2TWqi>Pyd8s0#m?gbX#JBVy6vY zz~9XQsD3k)Iq0Du+etXqi7AR_rsqYrl~%OhCBJNq48(d{8}riU9q7a0z7vDjf46p` zyKo_=vp-;S$O18n3b2pzSa@9lS|dH8NG`E=maoSSy|_8^!~$}s%tS`f_k)i6I3~wk zmP@D&BMM;}j4cD@MJFE8dbMyHr2Ba9GvM)E&lzCfV?5hu;yq;m$87y!$WZeTy!QpcuMOz1hT%Vr>Evf_j`}X!es|4y5_h(1)3=X8$UT)4^#krXbB(;gI;cB)Q zkc)KsTE83hSZEbp6RiFLSFs;(S;8^dGx<4vpS}aZO!a2@aEn^cnKn)8iS?V65cEi^-l?Sv@IFn}BS7cD4OK1*_^Jje5*gQHnGDR{;E1M0p4FkwgCthXOJpr@7=i|Hy#u2{O-?IEWR+$Hrdb|?bCn_^tG1G zjN8V78KN)0{`+#kINXWDBt*hSsCtxUU_QApTjh}luOJADmrfrxv zY8+ozI_SHvd>*Z6ChejP+?dFqVz-ydZEyWLNM4|?6SQ+xxTUZs5E4*wqIfh)-PT9Bt_ zQPvME^~jR^!dqBlUqX|-KFAB|RI)swI_lB@>Jx`iHOjiXUNY?)X~WF7+~D35jpQORxH|^>wf0^Q?hjr4ha;6j4XCrzENZ!Z0_=RqI@c? z)g!u=XdEB#ubjIVrs6DlBWR-!ls`}pHUv{0)}(iq#OMW{#4?qc-*;x+@eP;$6YPzzoD{-;Qd?|1UVTTu5 z6s}rUAd1-vQNyJVHT`V%(ICs0UM)%2fhAoqOk_-#D)WI-rF8UiWTO6*w9QD z7seW3v!TN5n*WiO4?GKytlZ%G4ZyY*8A6P(Q|5m-UmBAvtXZRjY06s<5MZt zMtO*Jt_^{QAElfkfqjozSd63BZXnV_GyW`P*T%Wn27z0`72Bf1tRw6#*kjpC0%DLJ zX&P_({gDv&xj?)rlnGzZQ|oW(-HM-Ec*|-A)|cpKu}qH5a#*ecOmm&nCcT<`*DK}d zp38g@CkN@7n`1BLke=0m5pr_6{w$G|xB+Ko-yfj#_bYlRQa*`2Mrd#8`&k^VB!(a~ZWr)l*H*XF?y<_oWHsDmanA!eqbTl7bWD>~q#sFvYu} z3yM1E)}}wT00fD5vCBQ*96t6{z5y}tev%lpBe31XV!5Y$e}?tX(qr?3f46@^%y&t$ z(k`Q*8uqwP_}lINBd}$PTjVJ_@{=*J(wAT{a{KF))N(yrKKfvE*(|OGrj5;vp99la zU66b#L0Z~s#>Nas)?jU2I@mI_s_Rf~|FJSCTciD0P}|VC9ly8@0#;wam-E=p$3HEW z0#~-NED?jTnl;$w6Lk4#S#8CY&27afYE7U;7c-Ef1XieDG1=9U=UStn72#XF1AM4qV7_+Z31W7yzF$F7LG0yy`8&%EGnkVjZs+ZbKe_&b=Vqw_5+ zd<&vg=g;hjth+vKT3)zhk!s2u>DLO5Cl!a;z&lOgL{C-zTmhLan%(wSHcQJ^#8k~D z9;_EjRlFg8^Ne4lHgG4xVuI>&)H>?~lFO17S`W z_r>7ZK-Z*?GH^C;oAZspdlG&84(A1gyNj+`(`&<>w(l(tJ4w1&ZCz)3r*(E$NoN1Ll=hnfn*RB-*BqgPgU|ElUkfw zq7C_d;(Ca3(pd;qv-H!sASShF8iPw)*0*72jVNX+6H>{I4ws3O3iT$LP^u@|rO^#7icw@mqyg$FHu_igp z`uAZnI=b;bLu=6)*9}33Y<;MPfta=v*-C41t`FI6#V-A+;3=1K$=HGs04+^7WwOI=7@uYIz0 zN5*ymCX^a%6|!p&2y_>xJ$`YiP}V~UfKGLhB!ioLBY?Z-l*ES{+r}9Am?XIEwu%B` zcav(jF{G{PlZPFt{F=#+Mi|n9x6IcbD+Wvv+#PW}_UADNjyxaCaRkm5)@$!LQB-sW zUoy~zVcMKuxb0k?*~b?VaFIg`zhLZarYg7^z6DE>zisOjT2t^WA#p1)X}GMT=ZTg! zy|7MezRD^~#=&+8V=LC3PQJfSUD4f4JdDV!v?OQVeQO)cNdxBy)+ci58|V5k5=Oft z?9b|C%$z1glR&|ZxQU5{i``6|Fc`y}oSW!=PD*Ln-;D@wjmuPjY}nZBYj*8d zkDe$67G@0_--mq)!IU`3PM7WXtK%=IA2v1&TNFWDk$!kHmD64!VR}nwbHJdmz^@2- zB}}-a%=`V9p27}gu1nkV*IpQ#+H2fV|7VULi3ZrkH~u{2XlM=9DZGbR=e^42HWuO& z$zIRljeizwQeUa3$6JOl3k*sG8HHJH?il`Rg)+OVk44lDJ^o2Ntle!Oz%2>7#qLA& z$%pYv!(%~JW9ANNxuEWDewQ^pfzRjFViu+$puim)($-;>IcyPnto~mKNM22g^oqN@ z;W1qbg!=4{A7#hnr%Z!uCbh@>2xLeuaPIG-65!FmI zdi{vr`_`_A)6k@Rg6Y?Vd%3FgLR@Q)V^q#(+3mM%E_gstHtN0-47N$<)v@}JMEkbO z(IL+c{z_4Lc9VYe4UsVYcIToI728}`hF`x9I<$Tz@>fA$jeHNOA)yDu*z<*v(hld} zm8#cI)6;(wcXHg__H{X*Mp%otnszB&3@#d)x=&fLZVcM%Ke_ouB1c#paXaWn(E7}O zBWd~bbm*1r|3Nh(ImTUn;?J^1M>pYEkuy`o7ktl;KTc2t8*=CF-5ew`4ipsL2C6W$ zbp4A%95Hpe|CQoFoag_jc<|)>&bhxA9Xh&`q9UC+!otG6DHRTjI0wq5cIa$J8UhNn zY8lJ>pPN3M3~c_V*%PwrK|xRLe^HPpQwnh97Ua>#nz}qX&Pf0b5a8O@sqj!Zjt6-_ zb(^3M#%PfK1sXH7Y~Ua`^8M$!pmQf=l>g5sIuD^YA0=`Be59*;bn!nrLMQHnLaRU5 zwU@M`%73o&v*mSk2h9kW25+1_%&9Lwu9jrhS+g-^8Ip5c>>Q$+N^WXHQC9LH zh}59m(wbRzw(t#{!10Z(5%PHI=Zo}ppZS#gVEmwiUE-yMaR*I8o3So`Ch&hDvm=vlX6?I?w(Hh( zZ=<1PM)V36@1Q|QTK{ziAmIb5VrAtx^+YX@Wi@coD|2}J-}m>%#tif(z9Vco>tm1N z^lSn5aVyl4^`I2*fmax}P4(sB{@ATv)IZ!vacU58rDCD~h(~(o+ihxC@9w8=x(@MN9?*Gwzkq@>J(ojz+j-4LVS#4Pl1|>)zjY>cjq$PrB~@0*>mR7LrtMl z|EauTWMl-H?xK@UycSWC_pXJ0`~D4Zsp{f~tCZ|6TwYoTc5{p7-p$akH&488H^tX( zsOux$z#Iq0v9#10cs0ClJI=H9G6N_{mz-;U`RA#4x|*8$ODx4f0e^Y9>hQ-8x0*E~ z?=?o8tfSe5*)O|*Y)0>LD=#f50PFZXA86U0M40$iijkbdvqUz+xE0?ISO?NZ5tLlh zsD>u*PrX~W{aV^6iKTs^8f`QixZ~)Km3u3;pOI7cc0z(lEos@Nz%K&>e0_l#m1qXI zKXgGQMj0PfgRQ^6Ps-iM%A_RcYmVvE-EF)(ATCml&GCh}{E(e^cck>*HsCAKCmX0n zFrpU`WGm7(PZu~A&=bf3#tX1g6X%<20;YIEVNMn_u^>4&JFvK&sqzTErVnKo`LpeOkW} zyO~3aQ$jr|P|74%Q1kUp4tw-@A>rrtN>5zl+)PO~&}*WpdM)-_hUTrQMi?6zH3tz6 z-az}!20q@+3wD3Q|7I8S_n(}aZ`9sn#F{@Z-2S<{a_VKHz{QIT)!U=AD_`WcHfaMmP9wiz3sGJ?sWNpRO)sUwIoBAnnYQNXi4Z1H6N*VcOR(Q z04BSvu&$(72`50D^uauy5F7+PAOa2sPYHYy9&B(xflpgXLPA>1$~$40RC3-`x6#La z!0-L*yCvyWUxS$SHMY`X#r8wShzW*9fwU7C49&L#zF&f1KUe|YF*$xUsPX^FvP(Ho zKZ5TE>I{W3DwKTxpIfeh^;b@^e_jsmOjiOfVx%X<1YO$X;T@Th>ZXvc(9H(=zZyp)wv~@ zE7)e6G7<_rHWx0?@*N>fwJfkRmG-I5_FOG#z|W;S>xD)61I?ltFwke0z>IA+0yb`{ z4`w*wN8@^$_sJi$su_n2^Z{lJBL7l#{|urx>%|)>FjiwBE%@3+m|p=XFLFnvm%uZ2 zvqiL%W&9eow0t$8K)x-?upuB;{L&_Ih2ekNrD;+=x@3%z~Xj*O?I2Dr+iaeRJ9UA-9F391; z??(Zf@oBmY6n6tM zgWuEjZ*wR~UrSZq;~MhgRaX8C%EcrY*mS9V-sUACZuL3ekC*EpCa2y$kg`@zoFe}z zMx>*Ac{Au=(Zhc)K=`-N;By+C#h>ecjXyfueSXZZ(JKu&L_STnY>mZN0vS8s?KT(C z+CaELC&C3V!X=m1E!Q%|^jZg#w$Q?eJ@X!!Iz|n*UeAmwvwU~SKQ9zEc0oRev<2?g z^Q7m)x$sP-z^(ZwBp!xqCR@7~t0#Uy_E+lv6FTYHi`5->?BSO)wW|G={#Z?0NVou; z>lktVV0(`EnK|VB&tk7>u(Iwt9N+A>r1k)3KOZhAGmi;ee#x2_8fsq0S7R5mM{xiY?XP_BRF*0ib$D10r(q-g4x_4vU60m|6T) z&m0OVdc@es63!xQ>e&57jW|Z&?$QsHw}RO`swr*Qvd}aP3SzAp6C0D=q~f63;@#Cv zWC&1Vq;aJKxjh)4Zjyu=sIR*b!Nf&7O9liFh8wT}xw3slDHTd}y5AHU3QH@7@4|ts zUq|(MyA4O(lr5tK9{C(ha6nPn``aAHj(G#7E5KK6ji79rh?9h)$?N}P0BU~kCuH)Na(^b;$&21Nn&nZUSVUV9tf$d#9-Q?!J8~VHtv1L zmF2qa5b{)p`+T8jYEAUT*OM0oZ0=Z)jiYk{tVG zk1(;OC5f?JtX~^A$ip1E(;%T9W2);@S(+O3!mSCdPj3dyNOm;0^VN(AFr8@u`#DgIn+_~jAq6ICS!*obxMNtfSqhXW zf|LnyDb*vx9gOV52rdXR$H&IMadnfk^>Uhp=-!f{z8GbKU@Kk9 z7Z#k+U0v_@9UvBg(qIV&nr8>P{E?9|b3 zAgU>52?;EI6?A{U7PPT&nY2Fmt8rB@3R?S!22d4q4?isGvuMp=_bihxu~EI4ABQoXxZp%yUNsxnU@aXWgA zf)EqeG2~7=z(zY&wzpRq02pr)u6jO58M*6)M{8+m^;r3hq3f4`Tj|Eygj#{wccsQk z3>3#&d#R+ufr<+5t-f-%SEow$>(^RxX=!!*qT4t?&wgR@>fbOR9bKx(e{b=KCUr;P z$#-u4V+Ma2$}Wlh^NH@I)PH+>`M>z;0UmEJbZc2r`8AO;wG;Tz&x`)nNAdGNPJXzn z9%s7Gz>>LmaKE#QxcG@PP9;_7fPPX{)gc4n1^u$73*0jTp7}?PJul+a3Jd!YEO2eP zV;Qwac{irJU-R0~(y6V@AjQ8FuxaEF6-n$^($MZtzuzSdpy&Q-yY;V4xPdzKUn{c& z^k19x_&7i!@#o5l! z{O`_|PV2(|(t`W_&7i>l2fK0kbm`|R`;mP^RaT3=;Bh_v^|%Go;BmuU0fqncv{X`F z(U$-J?x|D3Q;W76-oNjecJx1kOQ)gm8JkmKpS)N6AIn&Q#M=JPGMY(ipf#gY@hDi$ zZKzzWSe4dk9{cYeJ^G57J^Vadcx0E#i#(I5M;=uK_7c>p8OdvjLQ_|YRxh6{=zXjm@Roa_xGVgzMmWcqDs0oD%F z=!f3}t#=lPj0QLmkt^P6YA<{Vm|Q=26CCVE_`>7R%WIjiqOAHGK#|$0Sk(zR>Q=qn zK(MCKErA_fw2v&djRVnC+C-glXP5WRSKeD`z7s)vvT@4`zo(1F9T0@gqLPY$ln!}C zL#2;E*lPf>0s=^Ke3eJChch+k&>v@x%Td?|v+{j)>h#-<&m^9&^6~B^NTd#x)YR)u z-fZd~t?@E3H)mUnrFBox@!u9Tegr6m_D_C8i5?sPS~3IJcg2#OO9kN0`G7UE7GGtF zZqV? zfX0JvaNz5U4IAYkKLJEU>USpJY*b`W?{=g(IJYN?0zG8=l6FhDA2y{F=yVjk*+6YL zfe_+B+PWz$Ev;nE=CK}NN_z!FEO@I&vSa2@=;>BOdwH?E-O&cO&rc75Q}or=8GGS? z&fY3}nK+DaXRGsFEVKlnkts}s9wDQqPOH0ob z;7L9JyVQcxw3`7Cp+p?a*3#5PpVn6_h~ilH=~VJ7GAT-I`{9_Uf(?w((rVjSlI(0P zhe$j1OuvJm%^X1Z>)k>ibOBB3&4@b6`?fYMrIk-)aIk;s`&cPh`f){w#-5##mH`EK zNV1euRs?me4D`HG)H%5utk&MP4IiF+;LS=Kn+Blm8nDr=apuTD znm(#ab9m5LaOapaHnX9pr)OhuDn{_i`Rn?MlpL@c7p_jZ4}5xZpc&6}&Z9K1URsSO zbgdv{C!0qIs8s=aC3M3!CL7vg*s$`&CLuo6cv1e=ty^6gS~()!KN z!lDp7F(4ZvAo?vR}%I!kq?9n)x(=x^%LlfB9K)6dJUNF1J6fMDFVa zO4j0)0cdBd#}hB){XWqFbn=p*pyp`-9oiu#aJc;#d8fsUv!cWcB?m{z4}?^jhTHy_ z^2K#!ss`+c#{iMPJZMguf-i`qTP-KhsIATnW&Y@k+>OGT7ba@M-0T&C{#w zG}g0Vj-&MI|^8lpHgCG1bl^<}`9TPOV&Ry{S z{WU()(!J2D;F+cM}z=jF(80F*kl7WHZaK4d}|G;%)om?kuC$vD~mLjC}vgu zrPudz`nA9Q()0|x+vb>PFy-URbtO=dd#>`crBy%wsYQ5WfTU!~z)Q123)GZ(;xWWP z%_fZxCQSXs@1TNn;?tA;B~~eLz=Glo$S;VyJ}Lu7`pUUv64NbcdeE96*fvNjhH7fk z6o2`^`&I!L{wRugL-Q{)(dzVK1Pbu1JAz}Fwe}mLK!L5&t#qRX53z_C za2lz?8+(k;$GM|f&YrymO_p}?HMTJ}Gc>%Qsim!7MZ28oCRl zE^mY7#*NME4N7j#mByVn5dh?p1e>r3ZSgweIVM>%yYHi?LP{Iu^$7vB+@X_?$kPHU z20&RlC?P?5Qdm&1*C|aN2MnEZWlpJQWkLmAa{z^&@6PVaQadiXcOUo+PVHd)fEfYk zYXDQp=>|R#=t8Q;aOYyN{t;lQL3+0uf)lLs{gy3-p;Rw#GGh}EWpn6AmxN>P01~#+ z@^T3>U~w@7q>v$7>n<*7h_W*9+-lCiCH}JO-`zoDdKSgH$}Rs&eWh~#jw`MyVUo6@ zgOoEuLdH5xOilCZsNONJnRxoGLO&V-%^_23j64nD1~mKq*)fWFkq>n@TU5vft(uw> z@<8UW7$%@=xV@|-6|(mje;hMxVA$Xzws$|%I(XB>YV&Q;#sI4gO#^!qsFX%%d~Z-7 zEAjJvl-}z9(2}}+3rJR>-(9iu{v7)`d5Zv!C-?eqe)kmlWeh+wrIy-AN{Ejy4kVB& zh+B(rczTc)(9ksX_phM@b!M1=iJ(Hk#ng^JJs&WWU?$~<8V#@$6cg*=Wmd@os@)4X zhji1Cs}K_l3yZ~iVbeV`un}@w*)+0g*c6@nkeF)(?~Mm~p1?hE`~BKj>M^NXSkyv( zW#w*A&m@~hwR{#lB=Ep8BsW(&;bdxQ<&%J4QB9ap0)R?F9|paz2JYS+I^L~j^5HMc@=%L@2<5E^)PTfpmw{uU< zD?O?r0(slS!Y~VHC>j7k zUuR5xO@C=e@7JTJ`8Dm_xr!<}QE{Dfy(#YX1t92Bvy-No!>{Wvy_@`+10X5M!U~%Q zn5c2x79RPa)swuAHbWzwPAAZxegrkPFf7c|O>T=daM_K6LQi=w4q}S{qym(EYFDz3 zOD|VFg3|JQdNnoL@#%@BpPwCeT6#AEqPNYT6`jv*2)=SK~G+X9takv`rS#bS^~>6b@{l0LTk zEf2DALtt;;W`n?d5h1QjKqf&8eIX--su9s34^YtdjCThtgDfFraB6mDeV0%3jTftM zokkUqWwy%A$#MKS2vnNAlYr*{F3Jp$=Em6J^hLQ8ss^_33U*wFvm!%c^fyQa9aW-i z;r_;QWbTal?Hx623;a8V;Dc|z-2{H_xPjl@Y|5x-_vBG;Y7rnRg>-Mjqx1Q2fTR~` z*Wyd#hK0tb)ZH{Uw?c-H59j8>U(9K1>-Uz9)fwM4Hgwtj5VSQE)UdWDEZsI-O?V&1 zB0K8>P*0GI(bRnPJ%(BW;_>Y3&*+`zSyVT>o-w0|$sqFMs=yc-)QkDmXJe;M|9-Ta zO)cU?W#w+d>MPol2L@&R^IvM%4E8L~-HF)_Y`#fRKTxr&1Z=O{0ciy#K&n6`^chMv z8x9u8gm>@q0kxI06^1C^zF6_F!NZGUE;w@^{86&c9{48Fg~DzCC(P!{eJO3IK(Qy5HAj_FxHPL z{$qaB2CZ8w^X<|$&J>4r`;jUTv}HMD1X@&ycBD&dfW1;|4tNqM2osI46j*w|!cqTG zx0zWn1JF5FU4H;A@(h>BuT4c|&hbjAp}>vqRi=Yd);?`@vu8Tu{400_d(?5s07;jGp>F686CcDcXqo=O)2 z;rOSWm*x3UISS^At2=KeS~Q}+{^xsHPvZW6jqdzcz4yPdkN+|}_LHk5oEAUvkmn=T zcFEA3RzyST5Gd~XKluE&z|{V6i(%izoHc7Be=-0u-DZo5hq{&JI#$7(x1zYbiuOX9 zD3lo1gp`ybXitwtd$l{3yTs!^e|q?I1D}<89uv3Xo$TVk>Sa*v00&{2W)HBZgkbfm z*G#Mf7S>9d0|*HAAs$+7Ko>}?X4#rq@%KteIiNUoL+7kCd&EQh)5L1$ zZ?K$|#W!tJNJ655-BB`Q`9LU>rf=^uFzYn={1r6kLv=KI$YJgWNLh5qVEVqic*7n< zrn{#2acPuqbAJA{W15=E4}yQ7 zn&NL7X&&`VXHHaDXP4m5<>zQArjF#VXto|jA7jWS^2|Nj5v=Q%FKp81Z(Xd8pO0Q5 zNuDpzNnp`^K(AUhqgV#vl@*F5*uAzzmM*!q>Do)WXMpJt7W-}YSj{Z24v*w7s5oBgUShyLW)zVgKitHm(r zfzt6nlz(WA4sU(1=b>t|yaJENOnc~rh(3BvZidBLkY6y%wh;Nn)bqY$O3wJz)}gbS zsK((c{CHK1KZ*&(Ap}dEa%2m4wOk`Y_-lDda)lcsS|0S(z4BYN$2#o72Oy5=h1)kZ zYjX1n;>NlJa*Xr$BG2sZK~?Sv-vg$j&?4|8Z2I`%v4ea~_U(k~nd30}Ip$8gc6UwX z3}ZHrDu=iS=2fGmh2%Ui7ObixTD%|KbhC+7h9)B>*|Ve}LUqFUE5~(+6Cyy*aOYfa{?(XuC85^SLz_b)y+!FI&V zcg6jMzLsXB<2x`9FsW8&hqU&+Kl#Q2Dkhl?Dl}SDE@hh=k^N?b$oOwf1x~#eg~W@7 zFToPy3!UEPNj&JY2+7IGIXEW2wG9X_X;l4U_47BP>L96n0Q$->a$7!?3(s{~d4HW< zVfN}SIYeoFNpfPKZj75Iic~J5t*^ZMoR~3U12#Yl(1cu$(k25yEZMhZDJFflr!@$ZPx4gX`Xj-U|$e(SV?5g1DV3lk-sHIaZpJdDPNFDCnCQH1l zzoHNa;h1QFPOq_P>AYM@I`5Y&s{hmg|J-$+&vpJy?gRGzlbtWp{X%Z`%Q@I7#4r}r z_uSUXV(FF@-pex_<(h%=!H|$eQ*k`2NemfU?+x}DRfDQhG{u+^CHz-^v z2wL4^S~GCl*8gU6Jj-SK9apx@w_Bej<-?uYE0&4tt*Lz7$O zCAST`5?$ffN6MQ8me1%24JMvc>~>c`97Wk(dYmP0tbgFEQ>`n1xC+)c>R^=9;6zST zW^x9#@U(#MW6OK&hJTIo?Z41k&d8s{HIuk*?#Yp;SV+9<&!An!mjNl1*y(ggT5D%X zXG++8YCn^p{#8HPBp~;J{xz{7&k?;;F>}v$U+$4ih@l6Yj-{9;H;lg;=USvFCeV$m zX4u_tXj`D&VYvTfRBu6VVcZX2JrRY@h_^!;imU-tlTfjZphqtXkU2jZRf9bRyB+ba z5ytqNRx3*IF4Apj73u^&%*yA6xRk!_3rkVxg}|Bb`w*5LZ}-q*M?5f8^ng0VW%fri zL*1}mW7(J7pE*Ca0Q-1dz)gkmO1MM8l zLFA<8J+8!Kp*=yV=5HVJ3Wske)S@p)muFzoKV)7;`DdbMOD(75Tp5;t}P8uB3ARVEZ4ms{huXN*#zUF z*6$T$J}f|GMm){H-kvOT4qT2$>xh^xZm+1QmNT-zhO~5}f%l*v)$)MLvF<--y7H9H zvb4*AcW2il3FUVjHnGvzUY2H;*fl3$e94=_vLXQ4<@) z!<~w=fL=YYy);0aM@$mU_hlqE>zEPlRif7;t@#U8<5zN76qyp99-SKh0H5ZIW6=_c zh}L~}{EmgK5dZmd5^sCj1<9@+P2+Y=$8he5=!rC3*Y+uedM}yO13g{)n$9RbEXj|D zUP#RmE|A*9b=@F4dA>gOhBt`ckaIHCYA=hR&t;(9qx()))5d)`2SC|l>vMHJ-&oA@ zk;skw-Dt%c3!7aOSKXIvJizg2WpCuoZ*%;b1%d_6163s;6P?emomf~uDz{h_xLvxk zNMqB==#;x{lUjFYNU3~txsD(NcDQ!mqr1yNecoK)a*04a`LQfep!P z<0k43{i}t9guL8VE2Q1GColWhPOThzP+O$l^J1GdQw8nIV-n+nI!#>TNp9gOn~7dA zsovM*#h-64_v)bVJFYU()?;^2tcJpm@5`Lnb20zCn^vU5IO3niQf5DrC?f1V^KjaG zY4G|;6`{~qqJI}U%NJ{G*Qa@@VA@xXm#F9__!)Ak+wKQs7_97TAJc^IDxKusWdRX_ zC)6W^tq`fRI}@iL?%K;=JrX|w2y<$tJyRsC!gZ7A z-Cc4vn^spZGA*feAYkr&X_v$OHNupB@A6XtzNLHqmAEeRQ-}184+2q`r|arlctOFP zbmr*8mv8XyyKADU&-?D6w3P|LvR5i?8vb-9v-@D)1CED}8*i0=X`T?7I~eqdDgEOk z3cK%og-s>KRRIYC)BQciez1+5Q50sGI6jDO3|0W}vdY~LCn@3^yMB7({QeC5{O7dR zZfSB0p5Kw_HZn;(x98HH$Tra#l&im%1noR#eej;}V}oleA9Tz=PhbD!(GC>^o>p8n z5eF4D+6R2GXR9}xXN}IT5LxTi>m>(_rD?&}_#ge|2Brl$M+R`^V{7IKpUH7EdyK}x z6j24^JdgdO$L^Sr%EI_Y4F7tZH*{?9*p6t;XdF3!ZN=QPCiShKbajA+z+&MR$;XvP zwY5A zNq+5P^O1{R#vXd`6$(1=5Pa2hlfy5BtcGVHtABq@EnwVnd zo#c(PlyWZ!CLM2Xf%F@+JVujxaUEThPyToDAH{Y8<}SdGY}Xl(@1K+mvQ3FBJU)1a zJ;6;LyUS29{NbL5GMn$tw={d>+wq*Rw#%sm zPPEupujuP(gulX_epp%*uT zSgx`&M~q30CsV7)iTj^i`$`%kR7*GMdntu@)nM+VzpuuO3bpeL5oA`wy?j^SG^$7g zz{%lUc>l~y0j+XLidwO|0E&NU0)@zp!UHqN4JrI^gZ4Ky{>nt2c883P0b%hb!|dW< z64S9eEsYZ2W@y#l010;7+rbus@*Z}n$FbqoSB#mJcDh!@gy_bPG^4bQ^cw5cR##T=cW@~F@oj9H~ zhwPPi9=RGqP{8n!dXS+bnMeB1Md2Sk&}jb3JB2)mAFWd9x0Q-rAbh?gFwZBj@2tmt z&<9Nm?n^m?y2c-s)>?eyD9aPTyOB2g2c`4Kq~t?&8!3l+p215L2*u!@qBbISqB(24F|TwM2~qJ<6wa%$QYjR--T8tdEy(m6s2$M!Mj>Y zlaMSPs?&Slph`2BhY-V|%-44!=;;_yx3smZ8MBAU>5R=a10M6%Lul>@f->ZJ2p<}O zMCNTqODPPgXOeNtDhqGK&lVzcZUQYQ8sNja7|WheYE|cn7?B0Q8~ikGdr?f!u*KA^ zW2pNkKH!1#riP7%th`kGckef6ZZeU%o90OXDWpM!e;~T32CQTcgOa~$d3vg8W-v}P zK}l9SaE;lqqgYo+@KTJ=V*ydr&^K`+rSD|Ih}C|&$reONd~sEW716A;Bwbbh?cd^f zoAP(=yY#)WXP4{BexiGPqr2jbaaqGHR%}4@VM=XGGEy(f=Ota&Us;}Y?QdI8$EdJ{ z)B{-Socew`c>1EawyAn@!GJ@;Vq4&0Cro?09oO#@MUH&wJJn_b3~Te7s3IA754HRdFo8AKuUm@UJZ(B!c#)@XxPJC zD{lTR2ov>#Dz<`KN9+-|#|oUG_Z?ecB<JcBQa;6N;qf&d8RQeKsxmySxtF%aWrOL{lLUZ*C+|%FanAAek(ZmwrHJ{QNWwNmsLrLNjb;ees=GLQe?6=_kW6` zAzzJe>q>RePnr`SimZWr0_T{`o4n%aN5;SII&Qqa6_j!KK}rBArl1yw$Q3Hd4{)Q- zst#l6s_Qv-QW~An(nh4KEb8@cbVsh(fx@8ot2LxrDZFU0R@eVV+E+(K*+uW7C>Vr@ zB7%UDDlOdxozh*BL)Xv)C{j|=4GIHD_fSK3%Me3%4_!lZUq0jee(T;p?p=2luH{-V z?|aVK`|R`Vy`Q~L(%t}Dsmm%})B~j_^HKji5Mx?r(^V?;BIoU8EuRdex`G5=GsLzB z-tK6C=H z*sr}j*YZx0eo$~A{nR6R;@d{<35)KzSn}vMOALSOGoYqzrh?xNXu@orJqTB>wHftE z>l%2{mD45_A&yIx)~Gb*8*y+L?(%J7GD<;hHDa zl>l0;0i9UepBSM%v4|=c{giL%c$dRGA-_k;vE}!JynNYQ@>S>kXe7fO3joAgteIDB z+{*nDBa=iFajpKvOfFopy~yF1$Ertb3Aq-R;IKI)z%jqW{DF+bOYs)4aHb1Py%O({hgQLEJMV>2 z8^dMqf^Q^IYjdwzFvb_trf&RFt;8Umg!v)!I;nwfqn4Lby4jyB{e})s$x1;xA!>eD zGY{SX%0*HMsQWBk+~_s7ve`h(mqP2o7VmyNi|)H*-FHxm8+b20 zz||;PHOFMhe5A%(0&&fs*dqdXOc5!QT)$sn4T0QGvMiL>;jZ#gsoVKSU&~v+HV*af zn|$4vCFjUEtM)tMx;>-0CB8;&z-G<71=xz8;^{O}NbSVjtdvMDQ@+O=14EU%nn3sZ zvNz+u0>0djWs*@tNBR=s(BQ!G9K~r)$sX}9z_ygV2}t`}Sk+!Ud(D0d0?T^2vERge zR#TVLneX@5(W@x%0K-5nrSm&d0tc5 zckl#^DVl>(r7Opu91H@`ID7RVXMNQ_l6$D)V<|VqYM6a;l@M!U6o{g3H=xlfNZuALd z^6wL|zJE%09Z6&2awSz5l%s_6#WLL}VBlXV_lyXe)-3Ajs2G|4opI6RfpsXG=q z9hSZ_Ey@I)<7kg>!4v&V&o-HFy{KTkr^OF5MtY5tE<`^xZURWT~kkqVCyAi;FNSJd;ajm zeuw1{Lv}JRYr1TSDxI3zOb?TTyK*Lm#%pUiVrwDOlizL2C-W&TRw%*iBSpP-82(~0 z2Xce?n9-aYIO5?5P3RA06*f!V8m_-XF6gj-$?l<+N}YA|y7JJHNeR6t7YHaZ387Od z3Bcd_DZ@3Li_}TT&GE&JpayRZYrD9hlvI|^TK`8nq!uW{v4H^BXb%18Pcu^Y=!rWo z5U)BI4d;4*GZJ|OMlhj}rg-e;)k|?gHjsI@l%(4W=lawDcs~!hAx@Mb+mAr+Q zuPbt|!oB#ka{${Isw9)Z9`!73c$W$QwJXC#b*k#>amB^O4znVU%qA@7sU`XZE)(tS zW1b5zYI|ejB1TxtJGRiMIFppybyFLMD`9>VQAX-OYihc|%ZUr1d`2ZEmtoT8JoqYI z>*3UP#2euNP*??K4-g|jj6-nv*;A|)^vGiuoysN1i!x9OjfA9iO721=H4QcbJO*0D z9diU|ZzQE2&C!;cuY%IB2M7Mi4eRbh+|Aa{t;S(EP5xlwU|EALCwH!Y62@{mUoBe912iGeRMtbE}I zb2*PEmIUhVpJzRLDN#kAPn_M|Eq!j#I1O-^p-Vk%;OTR;S?_viRCF>?GBcxqOts^d zf~~wn-eTMCsL9?zXIH`CbMxt1@9Qcvu?65tEui!c&;}Bri}I^0sIYL!q2E<;hTh=| z?lVRcVNlNJn-8#qM2CLN4V;#%HVnU$m->Bo&CG}$5*s*b$sq+1Sp_hSU#G+2AY6j} z>a>l}Y6>b4Y~1Us`<7r`rCuIFmT8jWqLNTIl?%{N{df1cR)W}EUkV6l2LuGHAqxtI z7xM~A;DYAExx+JWFr&iaUPzD#ey9LAr=?cf$yhkV-=GVIF8t(W=BgYxrQih4+1)FK z3_v5jy7P*JW6hXRw^j>aeJT~`)Ko;>@%n)pYRQAeWeDmjK4wDMtE(>doYMNr|FUD> zI174q*2-0Bq{1DSA9XHr4rPc10u~f-8_TAlIXw;D*c{-_2f&TG1I3o0!(5)RN_#)* zA$6UYv$Np%lf$|zZY)3|rRQ=A0IFH8XqrR}N5JbZw0&-3FFL9&(qMk{0yv)@WmW@{ zz)fdJ1dS9>s5_V`XDt)EjZZ^Ew>qS%6K09@xGRfc`rJu7BHt6Pdp~&-+_9=rrs=~~;vt_<&x$bIE#;I$EY?)9=IwGE~4FI8ZQ~HKFivo$K5h6O)_w zF=Ry{vH*|USL$xvu20tq?#6c)X6De}upi@y=?Z2Z>338ewK>Y~eODJ^wWpcRJ0( zu`!Q&pt_U1M`w26hCa^lTpa~BSdcA5# zH>ejT0+_((Z^U{awxA%YU&N1kCk~eo)ri(~)5AL{40U;_=SBp1b?h~7tA~|eo_^}p zxEzi4AI5#kdnCh);a_wGhM|v@(^gKj`?XzCJP4coe=kvLCw)5l(W%1#&28M_)_8SY zdJ{GL4%0$f4ECqM3g3HIt`n7-MG@0QdiJ8q*m7VnS0>)kqfF?pkU!T{w`YD|rhZP9 zH(P9)bHRLtLoEtVuCYJ4`aV@p0&$i5>+lTb9{^UWgjeq_v3~jG6wD~?jA<2E)x9ce zSeu4h#Edywf2Yk;Aj!qW!1caDX~f4(eFzSq)EP}_?IbF7T;uW3iaF$ufc&8?sOxJ< ziwLOgi|lQYe4WtK!cBLPmwS>q#=L2(g2{#acDNX(HUM;bigg0sdrsB!Gqo&gwzxLN zX=tvz?#rJCncj+6J{ru^HnesbO_Qp+;G`Ads(qgU(WOvAULzaKH@Umf$#<{c5#!g4 zFadjzVUnpF9E&>i4!QE|zsz;I>ioJZ!>jcN!rFX;lP?uaL@$x280Y{B5i3zSDs^3= zO}a!0x?VExQH|kw`Mv{y6$9a_o0y zlB{Jx-+>vT;z5tbtm0~e-NvZRYXK}U!uO`80btJOUtc|Ir+hr&xt_?O%8hEg8~bN` zz}EHv;K@c?0@c_rV}&6LS&Txs1wyKJ!f%}0JEp66mP#~TzWPi}_EY>Tu-?39j>6eX zdAWn0L@uPc!Gqn@Wt-7i#|8Nog`B_DWzE$(Oz!f`^2^jQT*0pk8pXB?1WJ-M4rO(& z>>ZAAE+&RognW*11`{uR(HlaC@Ifb2r$xw#j5pvOUwyn3$5Hk*Bu+-*)S$iMB(s>w zx2ueb9{>YmaHaWW6$XacI_HJ^BW%fDD%$ZKi?-%KAOiQ_wMZ$zT`5&zI#`)W+|?DZ zPdf=+xx&a#?gB0(_uB#{tFlg(vTb@@Z`go|DSlo;n|1$hd(>Pbh|Jk(=y!1Adf3SV zbu=mU2cYKKl_)}N+S=J`cJv$T{cKUTwIu(ZQ?~`X;jlFk4{T zHlT8XJ8r^OrmFS4Y>jIUp_%dHU^Naf1jnGB$U2(@i#oD?*Iy&PH;oq~xurg9#ee9P-=1A9qOB_JTHLA*q{bnW>9J@2aGAJ#$H!v8sDC55 za+|3qSzjqLU+NCMk<4N0S&-fT`rEU5owm30vd$Ikm}5^*ceBuSN%VlC|H}8|zKWdK z7C`RCVC_r)6qAAj+f>z%eruq>fcW9B_1vD6NuD3tXbPv6aN{KEWXCP*u_6EjuLKC< z^+fgjno`Y){?PL(CZPD$rtMi2ky7(-zY1ff`qV6krj7<8G!D4R=LBi1UJ)`%9ud7P zIKd>~lHN`psSMz%rcXCrQJw%fkBP#yG9SJx3G^h|nUG_*94mG0y7n~OuBtjJ-q3W_ zRktE{>!(%x!TKDr=lHIQviTi=9KAf>fF!eu3tUKpP0Riry>I=HN?s57QfDLI#Zf;FTzzO$y zl1@WJ2hv2t_0|JwsA{DA(ixO_rPLk!qDvM_VXxl{o1iURgeXEwOqO`OSckphf+$(2roIOp}!Ip)|ez3Rt`P%T+AN7PobM{u!>M}09 zF8BrZC>}hbFj;8d`;ld}(xC7#o05;76mvdukxoit#~zt9@FPDz1IQvt!`&&T>rPKG z&X&sS^jp3}85$efb$q@(;nFtQ{DGAmvfh7!cB>J*SbkL3T+Bf3aikjbn9mnTrk@JT%J3dW<81*+-zZb*EBjcjshsdddUa=J{q zVtyTE^x5xrP%wDBzZMlXF{9xRhck8Hy@~&7sY%>gr+c8WF(sHaInG=LIUO7upM+WB zG>JPB=gFE|I~`jLZf-=3)hwP=OzT^{uP&D}L2PP?<@kGXnC~?AE3Y9dmWuOqQ*@P+W;#Qpis)I z6Yv@xuQF&=_&=8G;r^cMh9+;$I|5AuSDdooc)x$%T!X&wnr-r9n+?A$9nT+-BDBA! zSZe}My)%=nH3xMrw+{0e%iPQ`@@sRb%K0DUG6Ov{Z|1t?e9aCYqZS?BU7z-x*(a1p zNOQDK0cH|2Rtr%8CSis?l=wAix-z#@2igt6dEjcZCy$rJno zvl(xT_6!duy<1_}=xk?af30FCgS4Z)T@N+mwa^nQNP=LeK7`AiKUiq`&R+)&g*ZfqQYcn+ZKr zOR4>l<6mQf+UEIzn$+-^X&-=xUTC)X(p`|E$uFgAKJ+p!a`3?guK^YevLuTA_ zv6LNphK7b?zWQGJdV>N#NS8m)HWLRBSkQ}#iXPG(vYUfyeqNx&gbabkn@T=!C!FVl z`HyBi>G+^3>AH0uYogJJz=bugi@?B{MsGFdxAM!5nV0dQ83 zqxiqd5B;SifV%%tlGPH}=?3y>w1TdeFDF&6J{VuS)gIkr_UTr7R%!Zt*@kwB)~13m zl%_(@AOD-+o0#EeQC8l(FW%7oF>@QO1NMy*5bZB)ADVp8j&>Kg-r=N?;(S94G1V3l z9zfrCBz}Qz-Lrr&e{uT+M=7k!JuRd2qxO#T(O1@T_o?m^{QiJ+!eSffrJ5!@WV|w` z@b~&Vy|p0IH!ZXaL$mdIR!yE#=F7|R5`Z3M=seNunj-f5!icfk+uJa|_a4p{yU_mc zq9aKuS}TkrLlbe99hGlxZ5Vdyz=DLLyJ2A(x@18VOlSg(y*j0RErT!N0$atfUR-9em@G|HQtt+P&Y*A^sZ zBW&z23I9w%B79ME-l2z;Co=}y>l50EWhukHzW&_LKT)B&nhtn3vpNWiOIm!ktL}LY zGbt6QQtXXDx5*Sm4r%YJstz&hHb$jRc@X^K8}JL)u0t<(@>zi)7%J8a`FSDCZ- z5Og;ZN*Knuago3lyWACdeS@Mk8chyD72ROy_mLHn z_WlIRbRAQ=1i)H8#A}^$myH;Hh9G%ywJyV@ny+GaWZyAYg+-i{7+q{qi&Fnl4@9xj zaD5_JzLv9p+7P_1t|_cjr&YX*NXW$t8y1%U$hy#{{AFBma5X_V=b z`S;zYyNJ2xm(%GUi0d+{VhwJ5kL?G(X5_qvK2!*o6&{10TFWdKY;o?VlCrvBXf9^d12uWl!%N8djnp!>4H|t#C5h03Z8esaZ-QOENtM8nEs}3 zENX8f9;H8L!@PI*gGKx~f$rI8x`k#bKK-Unr~t6K=qn{jGl*_bPQ!%PmJ)gGPdwPg zb}-zkqLK)a{yVJmcpf_U#`8^1cy$shSIQL%(7-wXz6-5z{t>#-dvsY-X^V&(B!z#PxMMLVsdS1$8Rd9HEbU@n)FHQB84&)L(i$%~e$PQzF@=xW&~c`~iNWJdN!XDIEKbTl3;j;?~>RqStN zZrqs~?-)t&dvzL09|2$w#MNf(u^P*Z<3H?^JR}1bkxoj8%97WwA@3wuT>SeK;|oE$ zMgc`d=c7f9FX%|y!x3TuU-0ZqhCv6O4D}0#D{G>n$Rw%X_E9Z@cI6~^Lv&~A=}?1B z#L%<9Pcbom^CswxfWUr_lL8^>)MUd*f-g1C%diD`K>6Z_N;L=Kc4P;1OkA56-aP0X zABTefu1kBIo2zWBTV@8vd(FqQ^{r_*m2kQ6wEGfxSGqDf!OD^JXnFWv2&Xm~743n^ zUSeo`{`EgXCOGZ}vLZhMMuJG^CW6fe5+`x$GK4aa;&z6VJ#tqYho~|R3mE-uR7;tR zikPd_onuViV(arT+MIWPWGgI~lDYi3(!}hWhkFy>IIwYe0EMLHfp_4&79R5hl^bs> zrFCf?^3>5!Vu2fBmdLg3^rMlZ3nt{!UkgvU$)|IT9@bLn0#AvF5gw`DycILuACYJk zcO&p+1X;3p$v;am@pDS2%5lHYFf9U#;{3rT;I~*(S3gN^&GE2I=}Dd=IQj3I&efVM zi5xA5B3!_r8(qGi(2gu8tfV7Y3Y$^wlO(rYd`bhP3RPc826hiLpM-2i9F&-ZJ&%Dyj0{Z8Ym55! z*Gz+!jSB)7yl`<@S2Tm6DIBT7V&T9HbC6#fEpxB8NMtdi^|9<(! zu&3edLH)`#@+-96Q%HY0yW9o5d zUnsKR#PXStGuNH%(nSY~qrOIe*dAhr+N1iUuC@0{(UqHU`s^Lja*ap_*jl0WSG(4}uGKLr=fX?B}dO&UY1bBJUxkl&W>?lRO z=mU@wNT0Cl>H%B}ZNRm-*k2YZ1kl!7W#vqUtKO0JpXn)?Vm+jImAy;Kr$RN=&GebFTs9X_|5&zC8$tqnAOfy6o=3)gz0(btZ8tUa3mblTC~ zz7T$!uwpNyI-unPKhPZHU<>UEIqPHg2ryFTKie)KW^=>=I%4c^4ER|?_Bg8?I_7In zx0J(W7jR_iIJ`5bKr13#A9kNc3sgS~Aozm}{Kch+%5jpD9xe4sTqF17lJ59u*QiX; zOA^*siz^lm?B@tmKpn3c8X4{A3Y`$too;OTx6NlJwzggjF8bjS4WVJA9A*!I#Mm<0 z^N0~+6}0Mr8jB(qa4iNp5Ch%7Fu2_-eq5Zl1qKF)Cgq;}zqxj7AoSj;NKp?9hcVsB zp$}YN{WIR+*O&zSN*Q8k1ru|DRPnp7w1(qEzWNuMmA{azuG#5fR#|zqL;mUZ>UVU`$?F{)p~MhoqX42|DARQPv(`3Z>51i16K+u5x3 zwh_?B?x|H0@3WEEw)qAYoVVZap{GtUeeo%JKTFZFfOe@pE`IjO9VY`>(WGK{o_WEg zlN%Z|eqKJ^secmjFfx*&X`YJ*O+SA$<3yjp%?o;Yibm;t+X(x2X8&>Iegk+#O$&5g z@2lgv_FMRZ^A7NxB~9pI*$BIvQ#h5mxD~bMZJY~0t?28!=l9$!}-?hOmQs9iCURCy+GBTxTq+WW;u~v!_7xu!{fj7 zEvIRg`{LN^cz8gLD{SXSt1Rh(rb3U)r!UBWOj8p;)@vx#Z*qvvXwTe_qE}GUx|a@} zSUq-~0%MNytCGt7HA}}1DwgH#vRuSdZI}S^7Ql&f=A81P>j(>|C{TArX!CD=V5-n# z0HOplV-gY9o{toMq)R!w%gletA9}uo9N~0lG3NxjVca+x*$pi*Xp_}(-k~9PSx`NK z>i}IbB%VL`05rs4(1CxZOFkk50z2?2U$`OAOyqmYc_jRdDOaP+SD9XQcJG*2@Jx%` z^Q;dF&DcghW-HuK)cvF=OyVqEVPM9K6e7Oy?Hy9Oc<s z)+E{)_;+DB26R+O2L6$IT>`y?QC*>r9_`I|UqTv7`%NRK&o|K&mwo>2FMwT~L^qwC zPeH+&CFHaBpCc@ctz;{uMnbh@z`F~s`nPqBLN&+;8lw4Kxg6bvO z=6t(DU`KeNB+kPei){)t5om=Wm8_HUpI8x;BPXC9MIl=qO&G%)LDyN15K~W{bH9c1Mo-lLU0$fN8^k5@g~7RDCeJ*u zxB%uGN{}790!s__z}t->%)Hk1?=Az~iVa3T0&dx#DNtHAbo1zM#>HQM4W;eOkMbjS z92%7fk!&X0*tCnGoJ&q%x?SjD@;UYXU9YeR>_YI==KuJ13xd%$pJ%%5TQco9o*LkA z`n?b$k^2Glb!Okx5P(_Q>&>$QcxV@N1w2zm0dFU6H$nl!uF5EUh{wC9BntW$S`eUr zsfa@5UY|0C`*z#IM9=f5bMjqfyzU^6s>GzOF=Tlqr(D>;6=o8QpqPm41$@y$OB446 zFy?cuZ#VurFa8zgCSfuXJ}pK~H4M-F3GhVJ4<~4msLFjd!^F8>_ZSbxB%xaPush7<_F%L%WG=;y)#{wGYL2#le3 zWM7lbWo;w-WQQOEt55`;`gby6vQ4VNRe$!hNqZY^98Sf@1lU~#7f(uAPY_4lQ zTBjig7vP0OWC2dOyBPrbOj`LkFcuEmDboz@h-ilX57;K>apu4{RL1EMIy;tTp5HL< zl#WCe?VKkC)!K^Rzmx*u4@Lm8jM|csm`~X={}DWbk?1su!P%qC%=_2$fpm(v;l&JV zC+=HGX0L23Vt^qY<_dH_+9a28)WKR5dzX)-xxRL#U~g68TMp(GTwLTCF=~95318-Y zpA%OOr}`@w0^7D}6TeC)v!?S+z&C1HA;qR+r39gXVL*d<&=$-FSjXB)%N4Uj{0ENj z-dZgzT;;kRmw@YgwNd;eCSA`0uRk3Z^9{fk$V4$VMTWDFC z*~<%MDP<6&m{Syso|c>=gTUAIpU_|+`V+?aVXfsFY%8=f`J1^#GxVB8TzPfJ67%12 zgj)o1wycZ~HF<3UAiH<0V}V>-S=aZ4%5qp|OYK>-7T7iweZCh=cy z4{`t3{!KL386&##sh4HQVi$fu^8j^);#Qmng82EWznJE(t`YNqp8Bkb{Ocuc)E{~i z87Xtd;MrT?3c$+=HjZ=;_)N`}mf75=3Wv2OtYZi-?77THJ-!m_uEiObD5A(D zItQ#2BdorArlGq0;_o^j!lP6h&OSZYt?cgGyG!}xB}2xgkHE@ACT5FUCxG!vuSp^T zNSt! z{t+?DXEeEHG9VhDS(MbvNz~`M>tZzm&`~g=}`J*@(I8 z%iGVAIjaKM=HY4Ku-6j2VJd4?feIta4-B>lG6TokB``<$ovs|^e|L{2kwp;eH$RCw zD_P0(nX9e$25{Q?06#cP#q1;xw`QekzvoXV@Zb2W@K|zOuQ(8b5 zHm-ERXA>?6Kw!Z8=AEaa#fRE7Y%XB^d!isjEd})C)eTtne?0PH+&g61|5CDZ6lomR zmp>{f0Nh=%C{%k)ady%#Ci}e35mCl}97A{9r6^~&*TawBp_LSFyr zB<&=LA1$w}kno=86i6CYm!(2$NGA2%Xlyr`C+ zjw?pfsxt0$t2$+C8VKT7?Ao~=t{)dIQeMY*f8h<_gX~}Upz7bim%n(#9bXF~%P+f& zQs)?eC}No)d|#7d!WP=0WxJeWGyt_tI)O8ZNqe_q?yl-!4E~CcS7T$Ik}4k>XmnHS z1vGP!m)CLW+yxqq4c0jUSaITFa_fUStFb3b4A-=K1khOUOBPZ$6hzXUERdk8>w@c5 z;ITok<#8-5DV2B%uDNI>y^2}`t#5!{eBXh_krty0?EqA~R-fcF*>SuaN=XvD+bxu= zKU%!lQxq#KYkoAU!|SwN=w4!<)HRFTHyHm^GvXNjZ*-Lagsr1SyjY11gbma`+dTMf z`q z7n9T2GhB7&xb!Ny)NE7{Qnb=Dy?mRrM1eai2BE`s)2M?Tp}HfQXrrC&rn(P6A08zY z6-2olMeRY)_zHq1^mMaMRe~49N`9n(<>@@MDiZVG93ai^>QS+sI0g3;UD4>Ay|Tu# z*b59tZCKSs3gG!Q2vcM0qI+c z!AArp@4uzO{)sG5Pb5{X=$7N8^BkCuh=x2^ukAy?YEuQo(YIXc5rThJPu*+7eahnQ zcrok!d6=cXCXuftov&pfqj(TsiWI4Mr23Apa*v2F=+kNgiwsWaBWXgm9SdiEJ!ckv za-7freOI~X_8AASQzwAm4q*Du@@pBkB^j;eE_15woE%K6_vJOevKw|WAuA`5u~$&% z4=J&j)Q$-X!aFL?A+43d@vb7EDw00SkokhGAnmPGAXD$9RPd(0RAPr18cXzowwIl} zknwI^+a!06!OVe+|J<_=&H=N}mUaj@;LXx0Y!)Z}ZfDIk*C;dgp6uOBG_k%DE%dM} z=F{xXhm-T>oSf|nxXc&j=-Hn2nO%NwXEuASq)Z{AC5tBShoO8aJV7NuZ&Xebro2b}##u}L?oF#cj0UPdL z&T$7q=F8@eg1H6fjtY15XVG5M271?Z1}3-qOeY#ya2V=e!ACN%yY{`>0*zb?1q2xe z?q)pdnhzQ$XNI+fF7A9p1{$5zqkwfkDg407|B+pFJ_xBaFClfadXo*rX9=Ryn51Q0 zw~#{Jv8=YL59+NPA}I*q*Q{bZiK=kG773$0J3XqBkt;=XxD$J;ozPi1cY- zt13+>NB$)XA#FqChGL@GWuujn@lZ?j$a3NRQ^t-DGYof2`aOI=oMXB>)V@hb>Cj9M z;wFoc6h}A6)?k2x?vliYTKPK37@|;aqA$%OC4a{OP$_-&of8Nq;3E=elssWA#5K8W z7$sv(I$1*xB#NC|KS-N(3cx)WAi-d|JS?0i%II4z&1yrH#{c3Y)F=9RT5 z)PPx=G-O)9YTC$7^H!+y#1OH_Sa@9@9uO*gK<<(r{a<{w?OgmWLWMRd|@zse~k`CBh&=ay0-QOx>@hZg4@~u|? z%0j!klkA*lnkI&0J@NQ(gcYvxAmyQbXC5&YC+mPz_x62`nr7UH*x6^s&H-+4C6ZNW z#l$8DEJnJTDCJ>;b<}*Y*tVwpnPVh3EkTo1^ELrYK)6Y#Q6HZ`x)J8*Z+X8J%4u~E zhC|8ud7hKQwrL}g&i1Z!=toQ5+f?_lt+t&}_)}lSDfL}h6=c>On4Qn`GW24nn%xMo z9r$$^q@`1;v0M3l-%J#v@ZpJH3mT3mhROC#OQVM<-e>XY7?6OsU=1ws3sq~;%1(uB zwtBs=``Ch{cvC}U)HJNxlh^8eG1DC9^wRAM2fuCd)r&Yj!S^lEA!XMHMGc=;fyY}} zEcn3%`UWG>do_C8^BEg(I^&N>2cA99C^TfnONgBI9zGvR>B9&`=uA1KunYykfL!x@ zfoDj(M=CkpVQm12e-2TcSzf~P2L&c%u({EanuLkdoX{4>9GRi*Ai(-*&d{CX|w=zcb^M+Uw zs|A#ezY1zDQNPqelNhJ%HosY|n03cg^Ho#+o%ulj6>U2ZnL^pYQ^ut7&4*D=_cqOdMd`G@H1z>NK2PuaZ7azAsN!B^=t2x^Y z?{f(RENXHEOLT94a7&ziYAgMgHA1Pbx~qiOEMkqO$Q;Bo9x1Fa3bUj` z#)>!cNyud;JqsVfFC(Q%O+q$h$kb03$l>sL8Hv=R8V(bPS9|T6*&A@Y!hDm&UmDO3 zo6AjBV2`sG5z>_m)evRnH>4zQnk9y<-J9+1jf{O9Ns`svNU$FNa((ROEmi#?C4(WQ zcNHW#FcLm3AMJS@La@vCvTMA?G5pn^SPoL}&?ZjDd8g}0at+z_=QWLPuh$x9>0ZTY zp|q==UG(kk78NSyqQ+PN%j`$)t)${J2O=h|=AcaD#P`EBZuoPuotYJkreP=ImPN zCuK>_lM4sf{yt5(9UrR*PbJP+*L*j84I3o}xUsVmg*banh89_}?k*crsP?=V$j=nF z&?Krj8xk(gDG5Lx;OZNt|&>>*zxK%#eh8M9-v~#Hn?YUeV2JsJc;t655Rt4g##IJ*tF5 zPCwhm@ZI9SX^QbAjA;`^ZbPz7iu~sf) zc8x4(vPbFZq1_|yQgrXeew0SDK**o;$wFhcWW^HnI(CF7gCkIM9<*73 zG&`QebPy&|PuVn1Xa#&0r_CZH4wvrv>&e3bEAH>#`vn8+dxIxeKKlz7Pkj?SCN8l( zH^XD?YV?b6U@cmUNs})RS{t8ityfP$5e8rd*J7@KF}I5a_CC9kvwOM=2FNrBK*M@m)FlLe14cjs8d2nrzX!umRM-{ zilx4oRa++%4vK5erbi)D3Fj@`--eWU_a8ag&R^zOtv-GYo}g`7-gH%Z%(fDSm2@2| z31ZvH$lT)ivc(vSX39bmlPA&`Oc^Wa72nO|tT3>Xk#3NYVc_iEZ2q=6=UsDl(y}v{ zreCjUz@T5mkzMlOqjsdV*6f;l!l#f?nm*mB1Dk;g7Bv6rI^9iIuQ&?hHpCK$kSx=h zG&~+UUSz=MGzp=Li;EMcBoGaG(g5_46#;{XUUUg{v|EywTn(X|nS50%s+@YZPQ# zrLs;(wfhVkeRHI`Ha>U=YfI^!yrtrPW|lpzkVX7cgPC~6mP2d5M?#CIg4OQUBw~_X z+DRt%fumSO+)9u&yjz*PXXCXC%T9CcVNFcEMKsJ^hHZ{j2PXr$=*?7XXC7=2tM6BUN&ogSBdbcY*=PA zW|TP9 zcivpNw&~*X6$xeH03TG^y)g1bm)RoHF1Ev?XQGt_lLu=%m*0Fl>smEknP-?0pncZqhVi^wNVb zLU}xuVRFGCt$OymRNTQheo?~C)Cf>zt%N)#vQnbMf6S!oBt)M{r9S%#%Rw^DI^(tu zMH}z_cYmD2NRgu-E5u@WWVU6p618V%`Ap~^;lr;traZDXt`fMX|6whfg>EEoc!zi- zOkhB(-QxxdZ+gGdx5)45RPb(Tp+y;xt+32d7W-*?&FR%&lR(hoD?O+9(@fo~sdyQB>@AD8JEfdjg}%0z%ab5x6K6jSwI@sJ4ZAQ#CzF8Z$UUmuP=&e4uLAL*ik}>7hcI6{#b)3?2VFJ&PUT3Ubx1z+}tW=OXnl=)fK)xC8yO; zdApi@Gcr$G9G-Dfx)%joKE5fhbEMB(z!Z2d_{|mX4KlAs)HiVz41Z|qBvGY%KHYTC zghHCrghg+B)MPu}Bkct*4DdpK8i<<^@s}}m$VN4rURGgj;dS2j!Ch%#V#T!?dSQ{AiPA0?8}+Dw=A;Bbip8aF7uGC`z)hEI^^iI* zr?tJ5=j|Mso(a^^7wv}A#+CmLkBgh%FT;(FsxQ9pEnUzvqIG0Nk-)yi8J#=bJ6GBF ze$UPd3;-_2T8Y(K64a!NCVYG;z0p@lPH-Hd<^Cb{5$YjlMY}QnlTs2sLl!)IB4`)q{goK*YkUe)a)X z?ypM2l5qjT5@SpEsJ;gaM-&CWvW&XC|9cQejTqQyYwQqQC(grm6{>$doeEVRfuWD~;|v*_XlZzF~RX{yDEz>bt%3MhDM(QMMOBL3MFh7l_Io>qx} zJSIgYuO|=jNvMNFfr=c-mg#n#H^ z4g6@rmsv7)h6p7r@bRva z+5+-V*B*1G*;&-us`xh+k9NB`;WhZrnn9AP9gJlHM3xF!_fK{OVm~TzMt33_iNn^2b99&%x{q=ziP=lg1 zmsY1ubJ=<9!RbSQ4j2D6)FC3LccY~Ok0AS1)%%%^hRY{evqz0RBmI^gcyfOPEa*q} z%p3T5gkN81k#N`EvMQq@F6&ws>B3a6{_vx^u3gnBgWw^b((jLKhRZ$tKfbZ)q5Om* zB7C7Dm%n5Ozpi2tXy0{%;!@+u;>hMx>vOvR)PdGvPyZ9f&E{FlHtm#hc4i>it?~7H zO^pL;Vb-ZY>6*O-&S`RmEYrJOM8!y9<*IZ@$pK6`w>Djch8~4 zv$tjce5$XAisJ^7iL%1+Hh{rUw(SAQagSzOC^GhdluuE`8-C+TowiMvHez&{y~|#V zl)sXgc1j&P#Xlpy%J6lRC*IK3@O$8jv)cVBbC`xF`_RBN#P9Yw9~^r5sl*hp+OWCXz4fTxwRW^h;;5%A{RqD0ME6Q-M5jd@?|! zWo+g!xc-2wd<5LS%E@J?;0fy5^Qfsc=xgzgEd2m`|I z9AePLn?0})%puq{*CLpZk`P9P3#V$dH*uT=qXBnO^ zfFhjr#2Q4pHQtn0r~$>zh$qV95u4scNf50;#;ATWT$u5`fyt(x2e=}B?tnBP3f28L z7YsUwsgPLYu{+OV;Ixv>Q>RdDYRvpxN9cAuYYRvsMPQXm#Ge4phg8k;VqCRUh#Cp4L(G=oIi>1%e^V1w5y z1L|a^K4snxAO%Fq^iyAY&s^5(JoJer3i+e2xv#Aj-h_E9sXa1(ERr+H~FmZ*ix zId{Lq4ROVEWmLY^-aRWNdNbHQzwkY88HAJ0qEAD`!GIZ=<2Z1>lTr7Fxbtx#1`g}G zNguoEa$uO=Vn*qsq^iuWxY}y_q$qKTwvjzy&RBbLZYw;+cT}eE0$S z?@!QOXlL+-Fl_anz4j_D@GGjn!GCZtjQJK`a1Kr`I{z|a4ml>MsRoWL`O*&y{0=t* zlv5TjjP2Nsl8pIo=Pulq?1-M3BP#HEM;|Y5{35UWnXhkf*%8W>j@sTYyDHqyiohqQ z&i%W(W>w9GXM)P;trSl+54!iQ{pu8t?HZBGO};mC_u7C*gs|m;js7%z7l~2ZTVqL2c>?Bx{R@?g zj|zoNUR5fy8Lzxb=Hj;qPV5R()D$)#5}N*9;Q4BK6T8Rn+rRbl0M(?*q92u+s#6gW z!6pfsv!<7pp3Jm9c;0r(xL2?~@F$}Of-_+V>lTCV8qS~dcYp8OyG$p|#b2i;a8fX% zZKsU}j@^UL0bR{dwTk^{&As%&db+f*`0EsBFi|YFn)Fhl|3U(-8a{1hyqGO~fO^!9 zmsecq%6%{UnDyxLlhd=JA;n)&$*(BZ0(w4tIGca(3#ebb)4cNyt$e0=$fJ*>)0dB` zyPid6y*USWm#2wiLN?&{<)trLNX4NTx)Zp3qRC&~Y=|v=+8>kU&e-;PnUtPgCW#F8uJ7V1fUBz4Ll7>n3BydozCNTfVWk@#`AQ&70arv_MqI z@W}Swp5${kR$Yv%l3}B?p!_`RqPCaXZr_NK-a)996^G62%;;8Iw&kbjBUw86X}=CM zehUO^^LG1-DTAH8;?z-I&9K^!>)_77df$#fWrs(zI6>55nb-AWq_cGYLQJa?>Ab<3 zCgAaTl)|pF-4b%$@kcLR;kk~?IREmJSB=yv> zjQQH=!e};hjuq#&C=|D}rWU0|81Iwfj4vC7mE8FD=Bei+^Pof3kdnzN>~EXG15$>ghCQ#=U~-&&eg3B1fHY zto)add|6WwK*ZP0v9Ae=6BZSDC5jSapHc*N>kkMgw0lc!$6lSZ*u~of?|v!-_w2ck z<*eFHzq9cfR{i8#=eeCLu`(K&ynCsz?*>DDVk&3g z*q6BVe0j@sZJ zk8afAjOLs)+kdT{S?Ahs$h;wDqJ`Tl2oI=@?RIaexzj~v3k2T$DWu6iJcF3JjV%_G zPvKS%aP~RgHhjSpjAY;1%3NXgX>DqtR-Evolmu(1n9oiVxd+)a%m>+@UTjd6M3)J9 z`J=f`n2v&9xcCDYXb&~5Yo>2*e5k6j9JRRZ~VCfXXf%AJ?M<0 zGIht^mMeNnau@v_o*IWXU3?E49Q$RSlAHTGyTeL~UW12n!U!o+M(MRT`MTHo5L z6Dr}48O3bS$e(z%`1oZn0)zBakl5|Gm318wgxQ?2T9GTh^rD@TTUwL69a};)bp3jY z$pGcF=F}MA^TAHy+Dt#c<-vB(EmOE~*GkoN;?C9BI%Pr1_fvYdHb3i&<@9g)P`>X+Fr!FQ%930%n;55Q%Hu`}e&TXunTq0-fem_|Fn zrcqiGlKx5m#uKK3Kc&HS69x0fT|au8VU1_<=&qpV}Y3M<)$ z)!k?zIUjYEPR<(NE)7J**PFDu(ddn?fWHwJPW<+T1;-!);Sqt@Uhm?uWVbA(bLV76 z`8OwvNx3Nd(Xo$B=GJ4e1_2S4qlJb(iOE?(b5^xN4F)Rwj4=e4rA!7F3?__|KeVcM zX6O@Ca#=?tCQRYJDV$MJ5nLg%KYP1OWlYs|0G+c_t~D~qG**`X0Smc1F)UtU{#s>< zU|PdlX0WoUOb+=%euY6tukyDEX_{3wb2pa^1`~OIFqXH(8}b}vcrGpaDf>S!xS7Wl zIA~J&yn?eP_>UY0w0y||erkCASy57TNai`F!Y9FuQmZQBP9g}AmAB?Al@kueRp1Od zpm>j%RqDaArK=Enx#byFZYxjMl>j#5*dP>yvu>he|8>&&4t7c6-J!PhgM%{Dn(2 znf;&=FN+89o-eD4%^xvviN1I9NQTa>8sDgKlS#eb%A8nZMYq^%;Jsjj?fXGST-x}W zXSq>Qd8QzB_*PHOO*h%?{%`&dr!a>PT17E04C-<^C~hs=;!|=(RFc@FwG0U>$W_id zivAnC6_DrQ*ZO5@ATwXaO7wiolrzDbKAt~=pLgHhZSko!Pi*QcY%X*z81oWa;bOW` zbMKOaV1@optyCA(tO&8%BhkL5j&>tF=ZRTL$$#mDSsJ=J1R4LVrVnd zmi#WNhlXJSvP4q#aNEACF|ROdW-M!yxs&t$ay7r# zs!{&gq$k$n@>C)_>RfIhbClU5^Dj*>o<(p($q(OA&@`SeD$(8JN$qi5ld#NvVQF^b zAnR~EE4e}$nx2%&NU zhCyt$ek3Sba7MQ?m6v#J%_X|@87?EZUR9+K2paeOT;`ldm};gZ z=d|)cF9~Sr+`pg8Q+@%HV=e>8;0&bGHN9HMj<_e)xZEnc_D;{b6>v{x#L(EfAS2_7 z)F079d4LyG&YwUC%f3oPK`DpPGVoQ|#iVf-&&|~e=H{wiTlUf%)vHsE&w7zM)lQ44 zUN;Tr-tpkQ`bR@l=n_5t_)=9iPI&U*HDh{S&G>tvW=Rv-wgMrM&-yM_u@8NH9W?rN zTnz5Pid5VhFv_v8A0Z2c2n-9m4q9oWYc{Y$+Uzwk@>u&}k<(-b{BX3Z<&`t^>v zT3rWW#5~V$H+fXNv)cFox5jw-G&~?(y3umsxBi_=O;x=Y8!cI7YT4s8<(#g_ZM9UC z&@m=kdccmkG64Nxnu0?v-wg#}3$mRpGiOlA#Y@IzfS(tUm>lrleg92sl;JV??4H!D zG1`4V4`5O1o0z;k#{H8j8(S)1Q-jk=pa|iso`soT3!y%n7{kcfcm<0had`bbL1<)W zIX6?JkW?i&Ly*zY)yjIfC2p9O9eCCJEP{Dqqrp(61Lp9@Snl36N_2XwO^Qthbt`4E zOwgyY<#S75DWRcJE^2nQG>}+-Yu-xnP0+W^1=-hs|6LR)Dk6X@A(02lkGMZlZ1l`b zCPrsMKp?J97eTKdKfmrC_sLxx9-LnITR%mw$FbVJE;~pujyR$WFf1Kn-X|;8s+#z} zxA5&v0{u>|?KBA?VYe^885MX=s(!H-2Y~$eg$38$^`hY%Wjg=HoODl;f8!y!KTMWO z;#<M5tK?g@i!cc=Rc{E&R{;~S`FpK=*;ge2W z$4yTmd$`5G1G2Hh?ewJh_+mgOeXujZ2xyHhNG-J3Sgzj0hRF&R4Gj%2v!-$kncz$& zpv^Mt%~+#Vxa4ZaYFzYLE^_3iN;rO=oz)QhVeL-nG_xDIg}2_K@4RK7u~(!YXE?i( z5a+k~>`Fj-`Dk&ql?$u0aZdBCO?ZV#l^(pI0f07U1Fq3E(v5$g@hByet6v){n7!8NkN<7#}*E0a)~pq2#zjIrYtwywXujtEem0Odk#0NnPwp zPVaS81XR_S!#(8QX7tg}K6NJZ_D=1Dmp*8rw7)#q4{2Kgy_Ulvl&K<58NZw6fb#mEsMJ94lG@ z!;N87UKnBOsOe2(86K}oI$VrfErB0xsS=qKx8sp3J{8ZOKQhDkK}`$sx?6|%xXBXO z(Uu?4sk=sjFom7$%oLbX5f&19$gBGL%Q$+4nai^A6D8}3?K>V`Ua8+d++|^9&bVD` zmmRW`szh6@Mv>cNx$<^*+dN}>d%Jxx3jF+S@?v+T z7AjXOF$C9<5_2_e`YU`1o%yht^g?{M@pXCmsMU}UoDCCe1sHTk1;?FlXK`x*npxte zBF4z!o%yxJD;mADNbK=abaOTwm1uDRX4Ho#>F5m}gd2KkKG86*b=S4BnwIU*Bel#{ z9#5+D5fRyrk-OUEq}EJJ`SI0kkMtrPuqu_utFoy|G`0?jF+;nSTze7KwLEdg*=*!)t*F z$@q8yKLoA@2&MNH_Vmn+w;afzopFdjrr!>`G6N}r?jUI+*U+HMAv6^V-x4gQrvg*>!^hK0jH-yX}rkojei z95SBe85tIev{i}HnRhK0FO3miq)MP+B&&m!>?4HVccaMDRH}pBrq32L3%j9>dTKRm(vRVgGN>>c z7_aq6Uk#9#;{NgVjX(RTLlTNU^Vh*6wfZw}E|=!4uir8iW(Cx;mdawNK5Zz zOoYP>1t@RdUY-i>)(o7VRXb*_E%9CK1N6yfrL0fy3c(19?+pYf44l%g0I&+URMZ)U z!26~haW^55?7+&OgMo0anutomjIa2BIX$feTjFGNk#EW_B-n7aY_){?;rkzxyu7^R zPoLkUNqQq5YG{;Dg+3SqMSj+HsUbV-2KnkPn3sU1RAOdk#YsB7#5Zq{fVY^Rk55Px z361doFQ*2&ybX8ijY(E}F1k(b(i7|xFsU7jiEs*Bp4J}2SqAz=5cxp)9zGl|tgpX= zqEvUyPhtRzi(1Weg$k%}I-TXpm2$`4G=OXzVo!2P>Qn@3D`F;@(q{Vx z=WYZ*rqN+Xp{_0G)&qz@IZIdj;m~vE&JDg`QY_w9W|w<%D+WmQKT|cv>5`uHPaI3D z1laWZ&BhGVeOYNtkCHm4ZqWl&;A&vJ|40_Iwz83x;{N*g7ljfU%N~_bKEMKr!9cDC zdV0CgOMGNzntFXzt0~$qxCn&7cov0CN zT3RwRoy=EZK`k4tFfRfs;HvcDAIVBnkd}?E-#&Dp;ajZDn_W+!8FOVn|0Nj!vH@!w z+4w+JT>t080x;t{*#Atmxtnb1*DGawQ(D@nZfaTsQR6bF3Lwf~rZGL9?#m4PYdH)H zkN3D%|I~Xk-Ybzd>XFyxh9Q}O<>#!kgYy%Z=HE})qg6ZaP`{>DhcNwc@F$96>!H_@I?VAcXqfA<=kVXW&C~t^lFPPsc2aijeo(<_ows*fKEY!kAw8Bb zvZ0B|SGb4Ww+}xNChAD=m>d1};>&N;xIsHTH({DaykR~~syvXiTcD-g-U?v|bFF_Q zkYG*=-5yZe{usrbs2KX~+RHA(sl^JDozZS*TbK`cp+u;{gej|rOD2|tTmRB zkMUn2Ebg{B#JQ=A4VKMW8{67`lBF!VAQ!CbtSnRdyKyU~rde{L^sV*p<|M!9xUpGR zoaP}Qc?iVsW%6FxbEThd&$$@R%IXU~D-*i%S0&?VwDJeZ4%S&vh7L?}hLS^1GSq0V*tvrj3nJ$gQoD0D`>f z(_8P#*M|1?`KmFYP_L%LWX7wHMgVhiaam=9o@2EsR@f@HUx{Xc7{-Q$O$7vLb3sBa z0657*?f!VcJ}_+wo0-xMh*IX1@f>_jaVj=k84dLL`YWD|*%dHirO`SkQL<6t;Y$nV zo}(a>HJa_MMIdaC^F+F__+H0^PJ2C1tv*wVhjhA>{gO^IBm{FtX(ODk#CuM49OYm3 zDmvQG!lGQt&8SSjDAuh0an2AK;Bo$BIa;22?B%S($VuPl*lx>g7c5$rHw!dU9PaTj(qj{|ne9gOq(Pp*osi3h@ceXx0 z|CcgDv6k)JG-;301KN&D&)X{0#F{4_>(@DmvO}u%U&YMD%JV^u-0Fo{tBK8vUYG)! z2!KDAXJKZ}4-Y@pyXQP=S`AZs5CkB(oBX%u|(k)8}^m_ zjim@$notfp6_@81?>TcuHHI zx(t#7kg(7>YjUnJEjuOpZuTbWb4a-!=a7oB`%o$O z%Vqf23I&XLOC!b|ZWSeNe}@ zoMIh1;Gr0FT`9B**+dYk(0VjXv}_y!c$^Xo*%>uc%UeF=z+?dAx`x%tl+*I{$8XQ^ zU;=7fM@tI^@~lSzprxeC-~+VFgi(X6tS6qElheF*jXP{t7jn3s2Q2M$@Ct*-TkPE! z4jH{TbFYzg!KkvD8iTnGY;M5X#7&4;zrzmk?UtU1N_YdTXhOy9oYY=xhHvGQi)hxj z=!MnYS8>+C?WnEJFSshP%=+(kJ=hK-lTZRYb2pS+-pDDSmBK$s7{!!yhYjqVC3oFC$xxzeBZn~8*H#9T^!p90=l9F0y<3~7h zii^`+ais;IEOb*w#u!oKMDc{`E%s$5t-C!(Raf&vx7dPcYQ5BD{K}{Zpf=k7BerA-`kO;8jrw_a};C~sv21R zJ5v;gUQzw1Sg!4!mrj6~VG6bQ-l3cIV1I}QP`%!Z8e|j_ls7+p`n2d6Y~f&e2ptyo z3edZ5V~=Qw$C-dgyJk}9xC8H&l+dvN>jj%ShLu`z8=SwC?RAlZqj2x`?NVUGTy}^)b)XLR zX0ZzSpaS~L)U8NKqj-PA{~^_3ZM+U9uk6$jy09T!)UBPZ;a@Hf;*DZ$?Cvd^yfMKmy}C{M*lj2c}F<^R={$PX!j~M zMuGB*F^Z9-NUS>Gi1hCB2jP5Kx>t)Sul41U@84}ficX!kSQQdc+^S6niDUuZizLu# zD_dQ)OW1`deLX}urreMsztdyNe~2(bT9ou!?wLHv$}J0Vv453*`J(K`iW| zMVV?e76Zr6OKUKJZuiCYrLYI#LmQy%l8^)ORQ4w@q;2*$z_P7;?lklQ2c>U&KNl0QR$8c; z?mAR>DGa~r4&=nfJTDKN18K$(Srn$!L%l`cy0))9R-&(F37hfOhx!+0Thi!({oeRW4n=O_6$u24VBeZ!i!NSkUlZjTT%hmMW>d~o zf94xn`bz4rNV0hE=R#h&FHQ6%;u+w?;AqQn~ZURHvh&j0|`35>6Qqqsy-x?i{ul4-5>v ziisI{c9L$m#7=E(s!{s}P2cZmhN!Qu{_@qUSHnolAdnZjcEoaNQ53zu=c7DEE4lM- z+jST{Noc!8KQ%(qIvo}6JTY3q;<+{FexNVR8eNEmShhwo>N_rY1EwS^@I=XwJ(^aB z46uF2JUuCDjWx!&IgnUFSJc-JBpl8%fEtbBo1fDW*38^F$1TR5ba8=btqLu zEQX`NK_ew=18#(0B)P(Y)Q85Z6^t7k9UV(UAH4?k*yk_Yt|Ps9Ma%;hyl1;2=DAr| z3TCw##=cg~bxr>{UgHu?B!~B2K_9S+zX7exeSVfA3yX>$<%9SkHLLl%ovHvoNqF?q= zPfDzULYh>YCC$#BJ-fGM?fIoV&gq83QRv#&jr8&hZ_KcsnW1~jf|_Xx_i#8_@l>s* zj35eeq{tUCvf|Yw5Z#vBn=Z2qIzHme(4K2PVSk+`Y!qe%9m+<84}Sr&kUYX}9fwfd zp}Y6=r~g_PV}p5uZqFj`vo z1QGzNjU-?=O<5t~s@2Xfh<*7JddWfj^bIAYmQU&8fLn{=d9Z$PUo&&BHrC0Z)AMMq zvZBsIL(kj$O|r0QU>{Z9+_P>17&d%Emw!jB|M+|FJf+?BlgFVXl^WJ`&)~5BG5-E0 z$ydz_2-)S&myl`ZAn$qg5Cym7_GWP{)@oDpTF;RQ8YUfBHP7R;+_Zi5{R;ZBs+f04i|6kUP&tux? zA7g-OUsqIAo(bNH!4~~|;;EWhvnz{ivKD6zQfEu{O_W0J>Wn1G5$Yict3vj i#Q*zb^8etcXkpR6D#_HbLo|=S3{}y3fV}_v^Zx \ No newline at end of file diff --git a/dev/limitador/limitador-server/kubernetes/redis-service.yaml b/dev/limitador/limitador-server/kubernetes/redis-service.yaml deleted file mode 100644 index 2096d64d..00000000 --- a/dev/limitador/limitador-server/kubernetes/redis-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: redis -spec: - ports: - - name: redis - port: 6379 - protocol: TCP - targetPort: redis - selector: - statefulset: redis - type: ClusterIP \ No newline at end of file diff --git a/dev/limitador/limitador-server/kubernetes/redis-statefulset.yaml b/dev/limitador/limitador-server/kubernetes/redis-statefulset.yaml deleted file mode 100644 index b344e8be..00000000 --- a/dev/limitador/limitador-server/kubernetes/redis-statefulset.yaml +++ /dev/null @@ -1,55 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: redis -spec: - selector: - matchLabels: - statefulset: redis - serviceName: redis - replicas: 1 - template: - metadata: - labels: - statefulset: redis - spec: - containers: - - name: redis - image: redis:6.0 - ports: - - containerPort: 6379 - name: redis - livenessProbe: - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - redis-cli - - ping - readinessProbe: - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 2 - successThreshold: 1 - failureThreshold: 5 - exec: - command: - - redis-cli - - ping - volumeMounts: - - name: redis-storage - mountPath: /data - subPath: data - volumeClaimTemplates: - - metadata: - name: redis-storage - spec: - volumeMode: Filesystem - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi \ No newline at end of file diff --git a/dev/limitador/limitador-server/sandbox/Makefile b/dev/limitador/limitador-server/sandbox/Makefile deleted file mode 100644 index 7d90b8e6..00000000 --- a/dev/limitador/limitador-server/sandbox/Makefile +++ /dev/null @@ -1,126 +0,0 @@ -SHELL := /bin/bash - -MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) -PROJECT_PATH := $(patsubst %/,%,$(dir $(MKFILE_PATH))) - -DOCKER ?= $(shell which docker 2> /dev/null || echo "docker") - -all: help - -##@ General - -# The help target prints out all targets with their descriptions organized -# beneath their categories. The categories are represented by '##@' and the -# target descriptions by '##'. The awk commands is responsible for reading the -# entire set of makefiles included in this invocation, looking for lines of the -# file as xyz: ## something, and then pretty-format the target and help. Then, -# if there's a line with ##@ something, that gets pretty-printed as a category. -# More info on the usage of ANSI control characters for terminal formatting: -# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters -# More info on the awk command: -# http://linuxcommand.org/lc3_adv_awk.php - -help: ## Display this help. - @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-30s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) - -##@ Deployment Options - -deploy-in-memory: clean ## Counters are held in Limitador (ephemeral) - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-memory.yaml up - -deploy-redis: clean ## Uses Redis to store counters - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-redis.yaml up - -deploy-redis-tls: clean ## Uses Redis with TLS and password protected to store counters - $(MAKE) ca - $(MAKE) redis-client-certs - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-redis-tls.yaml up - -deploy-redis-cached: clean ## Uses Redis to store counters, with an in-memory cache - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-redis-cached.yaml up - -deploy-redis-otel: clean ## Uses Redis to store counters, instrumented with opentelemetry - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-redis-otel.yaml up - -deploy-disk: clean ## Uses disk to store counters - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-disk.yaml up - -deploy-distributed: clean ## Counters are held in Limitador (ephemeral) but replicated to other Limitador servers. - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-distributed.yaml up - -deploy-distributed-3-node: clean ## Counters are held in Limitador (ephemeral) but replicated to 3 other local Limitador servers. - $(DOCKER) compose -f docker-compose-envoy-3-node.yaml -f docker-compose-limitador-distributed-3-node.yaml up - -##@ Helper targets - -build: clean ## Build the "limitador-testing" image - $(DOCKER) compose -f docker-compose-limitador-memory.yaml build - -build-all-features: clean ## Build the image "limitador-testing-all-features" image - $(DOCKER) compose -f docker-compose-limitador-distributed.yaml build - -ca: ## Create CA cert - openssl genrsa -out ca.key 2048 - openssl req -batch -new -x509 -nodes -key ca.key -sha256 -days 1024 -out ca.crt - -redis-client-certs: ## Create CSR, then sign it with CA cert - openssl req -subj '/CN=redis' -newkey rsa:4096 -nodes \ - -sha256 \ - -days 3650 \ - -keyout redis.key \ - -out redis.csr - chmod +r redis.key - openssl x509 -req -in redis.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out redis.crt -days 500 -sha256 - -##@ Cleanning targets - -redis-clean-certs: ## Clean certs - - rm *.crt *.key *.pem *.csr - -clean-containers: ## clean containers - - $(DOCKER) compose down --volumes --remove-orphans - - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-memory.yaml down --volumes --remove-orphans - - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-redis.yaml down --volumes --remove-orphans - - $(DOCKER)_compose -f docker-compose-envoy.yaml -f docker-compose-limitador-redis-cached.yaml down --volumes --remove-orphans - - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-disk.yaml down --volumes --remove-orphans - - $(DOCKER) compose -f docker-compose-envoy.yaml -f docker-compose-limitador-redis-otel.yaml down --volumes --remove-orphans - - $(MAKE) cleancerts - -clean: ## clean all - - $(MAKE) clean-containers - - $(MAKE) redis-clean-certs - -GRPCURL=$(PROJECT_PATH)/bin/grpcurl -$(GRPCURL): - $(call go-install-tool,$(GRPCURL),github.com/fullstorydev/grpcurl/cmd/grpcurl@v1.8.9) - -.PHONY: grpcurl -grpcurl: $(GRPCURL) ## Download grpcurl locally if necessary. - -.PHONY: ghz -ghz: - $(call go-install-tool,$(PROJECT_PATH)/bin/ghz,github.com/bojand/ghz/cmd/ghz@latest) - -RPS?=1000 -.PHONY: load-test -load-test: ghz - # see https://ghz.sh/docs/load for usage details - $(PROJECT_PATH)/bin/ghz 127.0.0.1:18081 --insecure \ - --call envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit \ - --async --concurrency=50 \ - --rps=$(RPS) \ - --total=$(RPS)0 \ - --data-file load-test.json - -# go-install-tool will 'go install' any package $2 and install it to $1. -define go-install-tool -@[ -f $(1) ] || { \ -set -e ;\ -TMP_DIR=$$(mktemp -d) ;\ -cd $$TMP_DIR ;\ -go mod init tmp ;\ -echo "Downloading $(2)" ;\ -GOBIN=$(PROJECT_PATH)/bin go install $(2) ;\ -rm -rf $$TMP_DIR ;\ -} -endef diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-envoy-3-node.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-envoy-3-node.yaml deleted file mode 100644 index 702fd234..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-envoy-3-node.yaml +++ /dev/null @@ -1,74 +0,0 @@ ---- -version: '3.8' -services: - envoy: - image: envoyproxy/envoy:v1.20-latest - depends_on: - - upstream - - limitador - command: - - /usr/local/bin/envoy - - --config-path - - /etc/envoy.yaml - - --log-level - - info - - --component-log-level - - http:debug,router:debug - - --service-cluster - - proxy - expose: - - "80" - - "8001" - ports: - - "18000:80" - - "18001:8001" - volumes: - - ./envoy.yaml:/etc/envoy.yaml - envoy2: - image: envoyproxy/envoy:v1.20-latest - depends_on: - - upstream - - limitador - command: - - /usr/local/bin/envoy - - --config-path - - /etc/envoy.yaml - - --log-level - - info - - --component-log-level - - http:debug,router:debug - - --service-cluster - - proxy - expose: - - "80" - - "8001" - ports: - - "18100:80" - - "18101:8001" - volumes: - - ./envoy.yaml:/etc/envoy.yaml - envoy3: - image: envoyproxy/envoy:v1.20-latest - depends_on: - - upstream - - limitador - command: - - /usr/local/bin/envoy - - --config-path - - /etc/envoy.yaml - - --log-level - - info - - --component-log-level - - http:debug,router:debug - - --service-cluster - - proxy - expose: - - "80" - - "8001" - ports: - - "18200:80" - - "18201:8001" - volumes: - - ./envoy.yaml:/etc/envoy.yaml - upstream: - image: kennethreitz/httpbin diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-envoy.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-envoy.yaml deleted file mode 100644 index bfbfca17..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-envoy.yaml +++ /dev/null @@ -1,28 +0,0 @@ ---- -version: '3.8' -services: - envoy: - image: envoyproxy/envoy:v1.20-latest - depends_on: - - upstream - - limitador - command: - - /usr/local/bin/envoy - - --config-path - - /etc/envoy.yaml - - --log-level - - info - - --component-log-level - - http:debug,router:debug - - --service-cluster - - proxy - expose: - - "80" - - "8001" - ports: - - "18000:80" - - "18001:8001" - volumes: - - ./envoy.yaml:/etc/envoy.yaml - upstream: - image: kennethreitz/httpbin diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-disk.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-limitador-disk.yaml deleted file mode 100644 index f84a7c73..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-disk.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -version: '3.8' -services: - limitador: - image: ${LIMITADOR_IMAGE:-limitador-testing} - build: - context: ../.. - dockerfile: Dockerfile - command: - - limitador-server - - --rls-ip - - 0.0.0.0 - - --rls-port - - "8081" - - --http-ip - - 0.0.0.0 - - --http-port - - "8080" - - -vvv - - --grpc-reflection-service - - /opt/kuadrant/limits/limits.yaml - - disk - - "/tmp/data" - expose: - - "8080" - - "8081" - ports: - - "18080:8080" - - "18081:8081" - volumes: - - ./limits.yaml:/opt/kuadrant/limits/limits.yaml diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-distributed-3-node.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-limitador-distributed-3-node.yaml deleted file mode 100644 index 31aca3d9..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-distributed-3-node.yaml +++ /dev/null @@ -1,58 +0,0 @@ ---- -version: '3.8' -services: - limitador: - image: limitador-testing-all-features - build: - context: ../.. - dockerfile: Dockerfile - args: - - CARGO_ARGS=--all-features - command: | - limitador-server --rls-ip 0.0.0.0 --rls-port 8081 --http-ip 0.0.0.0 --http-port "8080" - -vv --grpc-reflection-service /opt/kuadrant/limits/limits.yaml - distributed limitador 0.0.0.0:5001 http://limitador2:5001 http://limitador3:5001 - expose: - - "8080" - - "8081" - - "5001" - ports: - - "18080:8080" - - "18081:8081" - - "15001:5001" - volumes: - - ./limits.yaml:/opt/kuadrant/limits/limits.yaml - limitador2: - image: limitador-testing-all-features - build: - context: ../.. - dockerfile: Dockerfile - args: - - CARGO_ARGS=--all-features - command: | - limitador-server --rls-ip 0.0.0.0 --rls-port 8081 --http-ip 0.0.0.0 --http-port "8080" - -vv --grpc-reflection-service /opt/kuadrant/limits/limits.yaml - distributed limitador2 0.0.0.0:5001 http://limitador:5001 http://limitador3:5001 - expose: - - "8080" - - "8081" - - "5001" - volumes: - - ./limits.yaml:/opt/kuadrant/limits/limits.yaml - limitador3: - image: limitador-testing-all-features - build: - context: ../.. - dockerfile: Dockerfile - args: - - CARGO_ARGS=--all-features - command: | - limitador-server --rls-ip 0.0.0.0 --rls-port 8081 --http-ip 0.0.0.0 --http-port "8080" - -vv --grpc-reflection-service /opt/kuadrant/limits/limits.yaml - distributed limitador3 0.0.0.0:5001 http://limitador:5001 http://limitador2:5001 - expose: - - "8080" - - "8081" - - "5001" - volumes: - - ./limits.yaml:/opt/kuadrant/limits/limits.yaml diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-distributed.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-limitador-distributed.yaml deleted file mode 100644 index c0d83038..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-distributed.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -version: '3.8' -services: - limitador: - image: limitador-testing-all-features - build: - context: ../.. - dockerfile: Dockerfile - args: - - CARGO_ARGS=--all-features - command: | - limitador-server --rls-ip 0.0.0.0 --rls-port 8081 --http-ip 0.0.0.0 --http-port "8080" - -vv --grpc-reflection-service /opt/kuadrant/limits/limits.yaml - distributed ${PEER_ID:-node1} 0.0.0.0:5001 ${PEER_URLS:-} - expose: - - "8080" - - "8081" - - "5001" - ports: - - "18080:8080" - - "18081:8081" - - "15001:5001" - volumes: - - ./limits.yaml:/opt/kuadrant/limits/limits.yaml diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-memory.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-limitador-memory.yaml deleted file mode 100644 index 76288fc4..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-memory.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -services: - limitador: - image: ${LIMITADOR_IMAGE:-limitador-testing} - build: - context: ../.. - dockerfile: Dockerfile - command: - - limitador-server - - --rls-ip - - 0.0.0.0 - - --rls-port - - "8081" - - --http-ip - - 0.0.0.0 - - --http-port - - "8080" - - -vvv - - --grpc-reflection-service - - /opt/kuadrant/limits/limits.yaml - - memory - expose: - - "8080" - - "8081" - ports: - - "18080:8080" - - "18081:8081" - volumes: - # the entire path needs to be mounted - # or inotify events are not triggered for updates on the mounted file - - ./:/opt/kuadrant/limits diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-cached.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-cached.yaml deleted file mode 100644 index 7c257bbe..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-cached.yaml +++ /dev/null @@ -1,43 +0,0 @@ ---- -version: '3.8' -services: - limitador: - image: ${LIMITADOR_IMAGE:-limitador-testing} - build: - context: ../.. - dockerfile: Dockerfile - depends_on: - - redis - command: - - limitador-server - - --rls-ip - - 0.0.0.0 - - --rls-port - - "8081" - - --http-ip - - 0.0.0.0 - - --http-port - - "8080" - - -vvv - - --grpc-reflection-service - - /opt/kuadrant/limits/limits.yaml - - redis_cached - - --batch-size - - "100" - - --flush-period - - "1000" - - --max-cached - - "10000" - - --response-timeout - - "350" - - redis://redis:6379 - expose: - - "8080" - - "8081" - ports: - - "18080:8080" - - "18081:8081" - volumes: - - ./limits.yaml:/opt/kuadrant/limits/limits.yaml - redis: - image: redis:5 diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-otel.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-otel.yaml deleted file mode 100644 index 8c61f7aa..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-otel.yaml +++ /dev/null @@ -1,47 +0,0 @@ ---- -version: '3.8' -services: - limitador: - image: ${LIMITADOR_IMAGE:-limitador-testing} - build: - context: ../.. - dockerfile: Dockerfile - depends_on: - - jaeger - - redis - command: - - limitador-server - - --tracing-endpoint - - "rpc://jaeger:4317" - - --rls-ip - - 0.0.0.0 - - --rls-port - - "8081" - - --http-ip - - 0.0.0.0 - - --http-port - - "8080" - - -vvvv - - --grpc-reflection-service - - /opt/kuadrant/limits/limits.yaml - - redis - - redis://redis:6379 - expose: - - "8080" - - "8081" - ports: - - "18080:8080" - - "18081:8081" - volumes: - - ./limits.yaml:/opt/kuadrant/limits/limits.yaml - redis: - image: redis:7 - jaeger: - image: jaegertracing/all-in-one:latest - environment: - JAEGER_DISABLED: "false" - COLLECTOR_OTLP_ENABLED: "true" - ports: - - 16686:16686 - expose: - - "4317" diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-tls.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-tls.yaml deleted file mode 100644 index 5c603a5e..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis-tls.yaml +++ /dev/null @@ -1,45 +0,0 @@ ---- -version: '3.8' -services: - limitador: - image: ${LIMITADOR_IMAGE:-limitador-testing} - build: - context: ../.. - dockerfile: Dockerfile - depends_on: - - redis - command: - - limitador-server - - --rls-ip - - 0.0.0.0 - - --rls-port - - "8081" - - --http-ip - - 0.0.0.0 - - --http-port - - "8080" - - -vvv - - --grpc-reflection-service - - /opt/kuadrant/limits/limits.yaml - - redis - - rediss://:foobared@redis:6379/#insecure - expose: - - "8080" - - "8081" - ports: - - "18080:8080" - volumes: - - ./limits.yaml:/opt/kuadrant/limits/limits.yaml - redis: - image: redis:6.2 - restart: always - ports: - - '6379:6379' - command: - - redis-server - - /usr/local/etc/redis/redis.conf - volumes: - - ./redis-tls/redis-config.conf:/usr/local/etc/redis/redis.conf - - ./redis.crt:/usr/local/etc/redis/certs/redis.crt - - ./redis.key:/usr/local/etc/redis/certs/redis.key - - ./ca.crt:/usr/local/etc/redis/certs/ca.crt diff --git a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis.yaml b/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis.yaml deleted file mode 100644 index f5fc3b02..00000000 --- a/dev/limitador/limitador-server/sandbox/docker-compose-limitador-redis.yaml +++ /dev/null @@ -1,35 +0,0 @@ ---- -version: '3.8' -services: - limitador: - image: ${LIMITADOR_IMAGE:-limitador-testing} - build: - context: ../.. - dockerfile: Dockerfile - depends_on: - - redis - command: - - limitador-server - - --rls-ip - - 0.0.0.0 - - --rls-port - - "8081" - - --http-ip - - 0.0.0.0 - - --http-port - - "8080" - - -vvv - - --grpc-reflection-service - - /opt/kuadrant/limits/limits.yaml - - redis - - redis://redis:6379 - expose: - - "8080" - - "8081" - ports: - - "18080:8080" - - "18081:8081" - volumes: - - ./limits.yaml:/opt/kuadrant/limits/limits.yaml - redis: - image: redis:5 diff --git a/dev/limitador/limitador-server/sandbox/envoy.yaml b/dev/limitador/limitador-server/sandbox/envoy.yaml deleted file mode 100644 index 7e678e10..00000000 --- a/dev/limitador/limitador-server/sandbox/envoy.yaml +++ /dev/null @@ -1,84 +0,0 @@ ---- -static_resources: - listeners: - - name: main - address: - socket_address: - address: 0.0.0.0 - port_value: 80 - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: - - "*" - routes: - - match: - prefix: "/" - route: - cluster: upstream - rate_limits: - - actions: - - request_headers: - header_name: :method - descriptor_key: req.method - - request_headers: - header_name: :path - descriptor_key: req.path - http_filters: - - name: envoy.filters.http.ratelimit - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit - domain: "test_namespace" - failure_mode_deny: true - timeout: 3s - rate_limit_service: - transport_api_version: "v3" - grpc_service: - envoy_grpc: - cluster_name: limitador - - name: envoy.filters.http.router - clusters: - - name: upstream - connect_timeout: 0.25s - type: STRICT_DNS - lb_policy: round_robin - load_assignment: - cluster_name: upstream - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: upstream - port_value: 80 - - name: limitador - connect_timeout: 1s - type: STRICT_DNS - lb_policy: round_robin - typed_extension_protocol_options: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: - "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions - explicit_http_config: - http2_protocol_options: {} - load_assignment: - cluster_name: limitador - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: limitador - port_value: 8081 -admin: - access_log_path: "/dev/null" - address: - socket_address: - address: 0.0.0.0 - port_value: 8001 diff --git a/dev/limitador/limitador-server/sandbox/envoy2.yaml b/dev/limitador/limitador-server/sandbox/envoy2.yaml deleted file mode 100644 index d1d15958..00000000 --- a/dev/limitador/limitador-server/sandbox/envoy2.yaml +++ /dev/null @@ -1,84 +0,0 @@ ---- -static_resources: - listeners: - - name: main - address: - socket_address: - address: 0.0.0.0 - port_value: 80 - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: - - "*" - routes: - - match: - prefix: "/" - route: - cluster: upstream - rate_limits: - - actions: - - request_headers: - header_name: :method - descriptor_key: req.method - - request_headers: - header_name: :path - descriptor_key: req.path - http_filters: - - name: envoy.filters.http.ratelimit - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit - domain: "test_namespace" - failure_mode_deny: true - timeout: 3s - rate_limit_service: - transport_api_version: "v3" - grpc_service: - envoy_grpc: - cluster_name: limitador - - name: envoy.filters.http.router - clusters: - - name: upstream - connect_timeout: 0.25s - type: STRICT_DNS - lb_policy: round_robin - load_assignment: - cluster_name: upstream - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: upstream - port_value: 80 - - name: limitador - connect_timeout: 1s - type: STRICT_DNS - lb_policy: round_robin - typed_extension_protocol_options: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: - "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions - explicit_http_config: - http2_protocol_options: {} - load_assignment: - cluster_name: limitador - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: limitador2 - port_value: 8081 -admin: - access_log_path: "/dev/null" - address: - socket_address: - address: 0.0.0.0 - port_value: 8001 diff --git a/dev/limitador/limitador-server/sandbox/envoy3.yaml b/dev/limitador/limitador-server/sandbox/envoy3.yaml deleted file mode 100644 index 03180c47..00000000 --- a/dev/limitador/limitador-server/sandbox/envoy3.yaml +++ /dev/null @@ -1,84 +0,0 @@ ---- -static_resources: - listeners: - - name: main - address: - socket_address: - address: 0.0.0.0 - port_value: 80 - filter_chains: - - filters: - - name: envoy.filters.network.http_connection_manager - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager - stat_prefix: ingress_http - route_config: - name: local_route - virtual_hosts: - - name: local_service - domains: - - "*" - routes: - - match: - prefix: "/" - route: - cluster: upstream - rate_limits: - - actions: - - request_headers: - header_name: :method - descriptor_key: req.method - - request_headers: - header_name: :path - descriptor_key: req.path - http_filters: - - name: envoy.filters.http.ratelimit - typed_config: - "@type": type.googleapis.com/envoy.extensions.filters.http.ratelimit.v3.RateLimit - domain: "test_namespace" - failure_mode_deny: true - timeout: 3s - rate_limit_service: - transport_api_version: "v3" - grpc_service: - envoy_grpc: - cluster_name: limitador - - name: envoy.filters.http.router - clusters: - - name: upstream - connect_timeout: 0.25s - type: STRICT_DNS - lb_policy: round_robin - load_assignment: - cluster_name: upstream - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: upstream - port_value: 80 - - name: limitador - connect_timeout: 1s - type: STRICT_DNS - lb_policy: round_robin - typed_extension_protocol_options: - envoy.extensions.upstreams.http.v3.HttpProtocolOptions: - "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions - explicit_http_config: - http2_protocol_options: {} - load_assignment: - cluster_name: limitador - endpoints: - - lb_endpoints: - - endpoint: - address: - socket_address: - address: limitador3 - port_value: 8081 -admin: - access_log_path: "/dev/null" - address: - socket_address: - address: 0.0.0.0 - port_value: 8001 diff --git a/dev/limitador/limitador-server/sandbox/index.html b/dev/limitador/limitador-server/sandbox/index.html deleted file mode 100644 index fcfe8c23..00000000 --- a/dev/limitador/limitador-server/sandbox/index.html +++ /dev/null @@ -1,1611 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Index - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - - - - - -
-
- - - - - - - - - - - - -

Index

- -

Testing Environment

-

Requirements

-
    -
  • docker v24+
  • -
-

Setup

-

Clone the project

-
git clone https://github.com/Kuadrant/limitador.git
-cd limitador/limitador-server/sandbox
-
-

Check out make help for all the targets.

-

Deployment options

- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Limitador's configurationCommandInfo
In-memory configurationmake deploy-in-memoryCounters are held in Limitador (ephemeral)
Redismake deploy-redisUses Redis to store counters
Redis Securedmake deploy-redis-tlsUses Redis with TLS and password protected to store counters
Redis Cachedmake deploy-redis-cachedUses Redis to store counters, with an in-memory cache
Redis Otel Instrumentedmake deploy-redis-otelUses redis to store counters, instrumented with opentelemetry
Diskmake deploy-diskUses disk to store counters
Distributedmake deploy-distributedCounters are held in Limitador (ephemeral) but replicated to other Limitador servers.
-

| Distributed 3 Node | make deploy-distributed-3-node | Counters are held in Limitador (ephemeral) but replicated to 3 other Limitador servers. |

-

Running Multi Node Distributed Deployments

-

The make deploy-distributed target can be connected to other Limitador servers but requires you to set the PEER_ID and PEER_URLS environment variables when you run the target.

-

If you have 3 servers you want to replicate between, you would run the following commands:

-
# on server where: hostname=server1
-PEER_ID=`hostname` PEER_URLS="http://server2:15001 http://server3:15001" make deploy-distributed
-
-
# on server where: hostname=server2
-PEER_ID=`hostname` PEER_URLS="http://server1:15001 http://server3:15001" make deploy-distributed
-
-
# on server where: hostname=server3
-PEER_ID=`hostname` PEER_URLS="http://server1:15001 http://server2:15001" make deploy-distributed
-
-

The PEER_ID just need to be unique between the servers, and the PEER_URLS should be a space-separated list of the other servers' URLs.

-

Limitador's admin HTTP endpoint

-

Limits

-
curl -i http://127.0.0.1:18080/limits/test_namespace
-
-

Counters

-
curl -i http://127.0.0.1:18080/counters/test_namespace
-
-

Metrics

-
curl -i http://127.0.0.1:18080/metrics
-
-

Limitador's GRPC RateLimitService endpoint

-

Get grpcurl. You need Go SDK installed.

-

Golang version >= 1.18 (from fullstorydev/grpcurl)

-
make grpcurl
-
-

Inspect RateLimitService GRPC service

-
bin/grpcurl -plaintext 127.0.0.1:18081 describe envoy.service.ratelimit.v3.RateLimitService
-
-

Make a custom request

-
bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM
-{
-    "domain": "test_namespace",
-    "hits_addend": 1,
-    "descriptors": [
-        {
-            "entries": [
-                {
-                    "key": "req.method",
-                    "value": "POST"
-                },
-                {
-                    "key": "req.path",
-                    "value": "/"
-                }
-            ]
-        }
-    ]
-}
-EOM
-
-

Do repeated requests. As the limit is set to max 5 request for 60 seconds, -you should see OVER_LIMIT response after 5 requests.

-
while :; do bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM; sleep 1; done
-{
-    "domain": "test_namespace",
-    "hits_addend": 1,
-    "descriptors": [
-        {
-            "entries": [
-                {
-                    "key": "req.method",
-                    "value": "POST"
-                },
-                {
-                    "key": "req.path",
-                    "value": "/"
-                }
-            ]
-        }
-    ]
-}
-EOM
-
-

Downstream traffic

-

Upstream service implemented by httpbin.org

-
curl -i -H "Host: example.com" http://127.0.0.1:18000/get
-
-

Load Testing the GRPC RateLimitService directly

-

This load test will use grpcurl. You need Go SDK installed.

-

Run a load test a 5000 requests per second (RPS) for 10 seconds:

-
RPS=5000 make load-test
-
-

Load Testing via Envoy Proxy

-
cargo run --manifest-path loadtest/Cargo.toml  --package loadtest --release -- --report-file=report.htm
-
-

The report will be saved in report.htm file.

-

Limitador Image

-

By default, the sandbox will run Limitador's limitador-testing:latest image.

-

Building limitador-testing:latest image

-

You can easily build the limitador's image from the current workspace code base with:

-
make build
-
-

The image will be tagged with limitador-testing:latest

-

Using custom Limitador's image

-

The LIMITADOR_IMAGE environment variable overrides the default image. For example:

-
make deploy-in-memory LIMITADOR_IMAGE=quay.io/kuadrant/limitador:latest
-
-

Clean env

-
make clean
-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/limitador-server/sandbox/limits.yaml b/dev/limitador/limitador-server/sandbox/limits.yaml deleted file mode 100644 index cb354bf2..00000000 --- a/dev/limitador/limitador-server/sandbox/limits.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- namespace: test_namespace - max_value: 10 - seconds: 60 - conditions: - - "descriptors[0]['req.method'] == 'GET'" - - "descriptors[0]['req.path'] != '/json'" - variables: [] -- namespace: test_namespace - max_value: 5 - seconds: 60 - conditions: - - "descriptors[0]['req.method'] == 'POST'" - - "descriptors[0]['req.path'] != '/json'" - variables: [] -- namespace: test_namespace - max_value: 50000 - seconds: 10 - conditions: - - "descriptors[0]['req.method'] == 'GET'" - - "descriptors[0]['req.path'] == '/json'" - variables: [] diff --git a/dev/limitador/limitador-server/sandbox/load-test.json b/dev/limitador/limitador-server/sandbox/load-test.json deleted file mode 100644 index b23422cc..00000000 --- a/dev/limitador/limitador-server/sandbox/load-test.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "domain": "test_namespace", - "hits_addend": 1, - "descriptors": [ - { - "entries": [ - { - "key": "req.method", - "value": "GET" - }, - { - "key": "req.path", - "value": "/json" - } - ] - } - ] -} \ No newline at end of file diff --git a/dev/limitador/limitador-server/sandbox/loadtest/Cargo.lock b/dev/limitador/limitador-server/sandbox/loadtest/Cargo.lock deleted file mode 100644 index 04451995..00000000 --- a/dev/limitador/limitador-server/sandbox/loadtest/Cargo.lock +++ /dev/null @@ -1,1874 +0,0 @@ -# This file is automatically @generated by Cargo. -# It is not intended for manual editing. -version = 3 - -[[package]] -name = "addr2line" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" -dependencies = [ - "gimli", -] - -[[package]] -name = "adler" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" - -[[package]] -name = "aho-corasick" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" -dependencies = [ - "memchr", -] - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - -[[package]] -name = "android_system_properties" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" -dependencies = [ - "libc", -] - -[[package]] -name = "arrayvec" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" - -[[package]] -name = "async-compression" -version = "0.4.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" -dependencies = [ - "flate2", - "futures-core", - "memchr", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "async-trait" -version = "0.1.80" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - -[[package]] -name = "autocfg" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" - -[[package]] -name = "backtrace" -version = "0.3.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - -[[package]] -name = "bitflags" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" - -[[package]] -name = "block-buffer" -version = "0.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" -dependencies = [ - "generic-array", -] - -[[package]] -name = "bumpalo" -version = "3.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" - -[[package]] -name = "byteorder" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" - -[[package]] -name = "bytes" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" - -[[package]] -name = "cc" -version = "1.0.99" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96c51067fd44124faa7f870b4b1c969379ad32b2ba805aa959430ceaa384f695" - -[[package]] -name = "cfg-if" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" - -[[package]] -name = "cfg_aliases" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" - -[[package]] -name = "chrono" -version = "0.4.38" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" -dependencies = [ - "android-tzdata", - "iana-time-zone", - "num-traits", - "windows-targets 0.52.5", -] - -[[package]] -name = "cookie" -version = "0.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7efb37c3e1ccb1ff97164ad95ac1606e8ccd35b3fa0a7d99a304c7f4a428cc24" -dependencies = [ - "percent-encoding", - "time", - "version_check", -] - -[[package]] -name = "cookie_store" -version = "0.20.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "387461abbc748185c3a6e1673d826918b450b87ff22639429c694619a83b6cf6" -dependencies = [ - "cookie", - "idna 0.3.0", - "log", - "publicsuffix", - "serde", - "serde_derive", - "serde_json", - "time", - "url", -] - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" - -[[package]] -name = "cpufeatures" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" -dependencies = [ - "libc", -] - -[[package]] -name = "crc32fast" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "crypto-common" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" -dependencies = [ - "generic-array", - "typenum", -] - -[[package]] -name = "ctrlc" -version = "3.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "672465ae37dc1bc6380a6547a8883d5dd397b0f1faaad4f265726cc7042a5345" -dependencies = [ - "nix", - "windows-sys 0.52.0", -] - -[[package]] -name = "data-encoding" -version = "2.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" - -[[package]] -name = "deranged" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" -dependencies = [ - "powerfmt", -] - -[[package]] -name = "digest" -version = "0.10.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" -dependencies = [ - "block-buffer", - "crypto-common", -] - -[[package]] -name = "downcast-rs" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" - -[[package]] -name = "either" -version = "1.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" - -[[package]] -name = "encoding_rs" -version = "0.8.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "equivalent" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" - -[[package]] -name = "errno" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "fastrand" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" - -[[package]] -name = "flate2" -version = "1.0.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" -dependencies = [ - "crc32fast", - "miniz_oxide", -] - -[[package]] -name = "flume" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" -dependencies = [ - "futures-core", - "futures-sink", - "nanorand", - "spin", -] - -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - -[[package]] -name = "form_urlencoded" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" -dependencies = [ - "percent-encoding", -] - -[[package]] -name = "futures" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" -dependencies = [ - "futures-channel", - "futures-core", - "futures-executor", - "futures-io", - "futures-sink", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-channel" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" -dependencies = [ - "futures-core", - "futures-sink", -] - -[[package]] -name = "futures-core" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" - -[[package]] -name = "futures-executor" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" -dependencies = [ - "futures-core", - "futures-task", - "futures-util", -] - -[[package]] -name = "futures-io" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" - -[[package]] -name = "futures-macro" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - -[[package]] -name = "futures-sink" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" - -[[package]] -name = "futures-task" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" - -[[package]] -name = "futures-util" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" -dependencies = [ - "futures-channel", - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", -] - -[[package]] -name = "generic-array" -version = "0.14.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" -dependencies = [ - "typenum", - "version_check", -] - -[[package]] -name = "getrandom" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" -dependencies = [ - "cfg-if", - "js-sys", - "libc", - "wasi", - "wasm-bindgen", -] - -[[package]] -name = "gimli" -version = "0.29.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" - -[[package]] -name = "goose" -version = "0.17.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cfeedc01d217935e1371901ea642ea6693d68893c67336d146eba23be04bf11" -dependencies = [ - "async-trait", - "chrono", - "ctrlc", - "downcast-rs", - "flume", - "futures", - "gumdrop", - "http", - "itertools", - "lazy_static", - "log", - "num-format", - "rand", - "regex", - "reqwest", - "serde", - "serde_json", - "simplelog", - "strum", - "strum_macros", - "tokio", - "tokio-tungstenite", - "tungstenite", - "url", -] - -[[package]] -name = "gumdrop" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc700f989d2f6f0248546222d9b4258f5b02a171a431f8285a81c08142629e3" -dependencies = [ - "gumdrop_derive", -] - -[[package]] -name = "gumdrop_derive" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "729f9bd3449d77e7831a18abfb7ba2f99ee813dfd15b8c2167c9a54ba20aa99d" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "h2" -version = "0.3.26" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http", - "indexmap", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "hashbrown" -version = "0.14.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" - -[[package]] -name = "heck" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" - -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" - -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http", - "pin-project-lite", -] - -[[package]] -name = "httparse" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" - -[[package]] -name = "httpdate" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" - -[[package]] -name = "hyper" -version = "0.14.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2", - "tokio", - "tower-service", - "tracing", - "want", -] - -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper", - "native-tls", - "tokio", - "tokio-native-tls", -] - -[[package]] -name = "iana-time-zone" -version = "0.1.60" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" -dependencies = [ - "android_system_properties", - "core-foundation-sys", - "iana-time-zone-haiku", - "js-sys", - "wasm-bindgen", - "windows-core", -] - -[[package]] -name = "iana-time-zone-haiku" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" -dependencies = [ - "cc", -] - -[[package]] -name = "idna" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "idna" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - -[[package]] -name = "indexmap" -version = "2.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" -dependencies = [ - "equivalent", - "hashbrown", -] - -[[package]] -name = "ipnet" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" - -[[package]] -name = "itertools" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" -dependencies = [ - "either", -] - -[[package]] -name = "itoa" -version = "1.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" - -[[package]] -name = "js-sys" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" -dependencies = [ - "wasm-bindgen", -] - -[[package]] -name = "lazy_static" -version = "1.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" - -[[package]] -name = "libc" -version = "0.2.155" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" - -[[package]] -name = "linux-raw-sys" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" - -[[package]] -name = "loadtest" -version = "0.1.0" -dependencies = [ - "goose", - "tokio", -] - -[[package]] -name = "lock_api" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" -dependencies = [ - "autocfg", - "scopeguard", -] - -[[package]] -name = "log" -version = "0.4.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" - -[[package]] -name = "memchr" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" - -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - -[[package]] -name = "miniz_oxide" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" -dependencies = [ - "adler", -] - -[[package]] -name = "mio" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" -dependencies = [ - "libc", - "wasi", - "windows-sys 0.48.0", -] - -[[package]] -name = "nanorand" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" -dependencies = [ - "getrandom", -] - -[[package]] -name = "native-tls" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] - -[[package]] -name = "nix" -version = "0.28.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" -dependencies = [ - "bitflags 2.5.0", - "cfg-if", - "cfg_aliases", - "libc", -] - -[[package]] -name = "num-conv" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" - -[[package]] -name = "num-format" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a652d9771a63711fd3c3deb670acfbe5c30a4072e664d7a3bf5a9e1056ac72c3" -dependencies = [ - "arrayvec", - "itoa", -] - -[[package]] -name = "num-traits" -version = "0.2.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" -dependencies = [ - "autocfg", -] - -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" -dependencies = [ - "libc", -] - -[[package]] -name = "object" -version = "0.35.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" -dependencies = [ - "memchr", -] - -[[package]] -name = "once_cell" -version = "1.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" - -[[package]] -name = "openssl" -version = "0.10.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" -dependencies = [ - "bitflags 2.5.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - -[[package]] -name = "openssl-sys" -version = "0.9.103" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "percent-encoding" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" - -[[package]] -name = "pin-project-lite" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" - -[[package]] -name = "pin-utils" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" - -[[package]] -name = "pkg-config" -version = "0.3.30" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" - -[[package]] -name = "powerfmt" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" - -[[package]] -name = "ppv-lite86" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" - -[[package]] -name = "proc-macro2" -version = "1.0.85" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" -dependencies = [ - "unicode-ident", -] - -[[package]] -name = "psl-types" -version = "2.0.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33cb294fe86a74cbcf50d4445b37da762029549ebeea341421c7c70370f86cac" - -[[package]] -name = "publicsuffix" -version = "2.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96a8c1bda5ae1af7f99a2962e49df150414a43d62404644d98dd5c3a93d07457" -dependencies = [ - "idna 0.3.0", - "psl-types", -] - -[[package]] -name = "quote" -version = "1.0.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" -dependencies = [ - "proc-macro2", -] - -[[package]] -name = "rand" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" -dependencies = [ - "libc", - "rand_chacha", - "rand_core", -] - -[[package]] -name = "rand_chacha" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" -dependencies = [ - "ppv-lite86", - "rand_core", -] - -[[package]] -name = "rand_core" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" -dependencies = [ - "getrandom", -] - -[[package]] -name = "regex" -version = "1.10.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" -dependencies = [ - "aho-corasick", - "memchr", - "regex-automata", - "regex-syntax", -] - -[[package]] -name = "regex-automata" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax", -] - -[[package]] -name = "regex-syntax" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" - -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "async-compression", - "base64", - "bytes", - "cookie", - "cookie_store", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http", - "http-body", - "hyper", - "hyper-tls", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper", - "system-configuration", - "tokio", - "tokio-native-tls", - "tokio-util", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - -[[package]] -name = "rustc-demangle" -version = "0.1.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" - -[[package]] -name = "rustix" -version = "0.38.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" -dependencies = [ - "bitflags 2.5.0", - "errno", - "libc", - "linux-raw-sys", - "windows-sys 0.52.0", -] - -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64", -] - -[[package]] -name = "rustversion" -version = "1.0.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" - -[[package]] -name = "ryu" -version = "1.0.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" - -[[package]] -name = "schannel" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - -[[package]] -name = "security-framework" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" -dependencies = [ - "bitflags 2.5.0", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "serde" -version = "1.0.203" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" -dependencies = [ - "serde_derive", -] - -[[package]] -name = "serde_derive" -version = "1.0.203" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - -[[package]] -name = "serde_json" -version = "1.0.117" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" -dependencies = [ - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "serde_urlencoded" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" -dependencies = [ - "form_urlencoded", - "itoa", - "ryu", - "serde", -] - -[[package]] -name = "sha1" -version = "0.10.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" -dependencies = [ - "cfg-if", - "cpufeatures", - "digest", -] - -[[package]] -name = "simplelog" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16257adbfaef1ee58b1363bdc0664c9b8e1e30aed86049635fb5f147d065a9c0" -dependencies = [ - "log", - "termcolor", - "time", -] - -[[package]] -name = "slab" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] - -[[package]] -name = "socket2" -version = "0.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" -dependencies = [ - "libc", - "windows-sys 0.52.0", -] - -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" -dependencies = [ - "lock_api", -] - -[[package]] -name = "strum" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" - -[[package]] -name = "strum_macros" -version = "0.25.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" -dependencies = [ - "heck", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.66", -] - -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "syn" -version = "2.0.66" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tempfile" -version = "3.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" -dependencies = [ - "cfg-if", - "fastrand", - "rustix", - "windows-sys 0.52.0", -] - -[[package]] -name = "termcolor" -version = "1.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "thiserror" -version = "1.0.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" -dependencies = [ - "thiserror-impl", -] - -[[package]] -name = "thiserror-impl" -version = "1.0.61" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - -[[package]] -name = "time" -version = "0.3.36" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" -dependencies = [ - "deranged", - "itoa", - "libc", - "num-conv", - "num_threads", - "powerfmt", - "serde", - "time-core", - "time-macros", -] - -[[package]] -name = "time-core" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" - -[[package]] -name = "time-macros" -version = "0.2.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" -dependencies = [ - "num-conv", - "time-core", -] - -[[package]] -name = "tinyvec" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" -dependencies = [ - "tinyvec_macros", -] - -[[package]] -name = "tinyvec_macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" - -[[package]] -name = "tokio" -version = "1.38.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" -dependencies = [ - "backtrace", - "bytes", - "libc", - "mio", - "num_cpus", - "pin-project-lite", - "socket2", - "tokio-macros", - "windows-sys 0.48.0", -] - -[[package]] -name = "tokio-macros" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", -] - -[[package]] -name = "tokio-native-tls" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" -dependencies = [ - "native-tls", - "tokio", -] - -[[package]] -name = "tokio-tungstenite" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d5dcb2a1ce06d81107c3d0ffa3121fe974b73f068c8282cb1c32328113b6c" -dependencies = [ - "futures-util", - "log", - "tokio", - "tungstenite", -] - -[[package]] -name = "tokio-util" -version = "0.7.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "pin-project-lite", - "tokio", -] - -[[package]] -name = "tower-service" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" - -[[package]] -name = "tracing" -version = "0.1.40" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" -dependencies = [ - "pin-project-lite", - "tracing-core", -] - -[[package]] -name = "tracing-core" -version = "0.1.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" -dependencies = [ - "once_cell", -] - -[[package]] -name = "try-lock" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" - -[[package]] -name = "tungstenite" -version = "0.20.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3dac10fd62eaf6617d3a904ae222845979aec67c615d1c842b4002c7666fb9" -dependencies = [ - "byteorder", - "bytes", - "data-encoding", - "http", - "httparse", - "log", - "rand", - "sha1", - "thiserror", - "url", - "utf-8", -] - -[[package]] -name = "typenum" -version = "1.17.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" - -[[package]] -name = "unicode-bidi" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" - -[[package]] -name = "unicode-ident" -version = "1.0.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "unicode-normalization" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" -dependencies = [ - "tinyvec", -] - -[[package]] -name = "url" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" -dependencies = [ - "form_urlencoded", - "idna 0.5.0", - "percent-encoding", -] - -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - -[[package]] -name = "version_check" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" - -[[package]] -name = "want" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" -dependencies = [ - "try-lock", -] - -[[package]] -name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" - -[[package]] -name = "wasm-bindgen" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" -dependencies = [ - "cfg-if", - "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" -dependencies = [ - "bumpalo", - "log", - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.66", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-futures" -version = "0.4.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" -dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", -] - -[[package]] -name = "wasm-bindgen-macro" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" -dependencies = [ - "quote", - "wasm-bindgen-macro-support", -] - -[[package]] -name = "wasm-bindgen-macro-support" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.66", - "wasm-bindgen-backend", - "wasm-bindgen-shared", -] - -[[package]] -name = "wasm-bindgen-shared" -version = "0.2.92" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" - -[[package]] -name = "web-sys" -version = "0.3.69" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" -dependencies = [ - "js-sys", - "wasm-bindgen", -] - -[[package]] -name = "winapi-util" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" -dependencies = [ - "windows-sys 0.52.0", -] - -[[package]] -name = "windows-core" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" -dependencies = [ - "windows-targets 0.52.5", -] - -[[package]] -name = "windows-sys" -version = "0.48.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" -dependencies = [ - "windows-targets 0.48.5", -] - -[[package]] -name = "windows-sys" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" -dependencies = [ - "windows-targets 0.52.5", -] - -[[package]] -name = "windows-targets" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" -dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", -] - -[[package]] -name = "windows-targets" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" -dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", - "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" - -[[package]] -name = "windows_i686_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" - -[[package]] -name = "windows_i686_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.48.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" - -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] diff --git a/dev/limitador/limitador-server/sandbox/loadtest/Cargo.toml b/dev/limitador/limitador-server/sandbox/loadtest/Cargo.toml deleted file mode 100644 index 86103223..00000000 --- a/dev/limitador/limitador-server/sandbox/loadtest/Cargo.toml +++ /dev/null @@ -1,10 +0,0 @@ -[package] -name = "loadtest" -version = "0.1.0" -edition = "2021" - -[dependencies] -goose = "^0.17" -tokio = "^1.12" - -[workspace] \ No newline at end of file diff --git a/dev/limitador/limitador-server/sandbox/loadtest/src/main.rs b/dev/limitador/limitador-server/sandbox/loadtest/src/main.rs deleted file mode 100644 index 41c2bd55..00000000 --- a/dev/limitador/limitador-server/sandbox/loadtest/src/main.rs +++ /dev/null @@ -1,26 +0,0 @@ -use goose::prelude::*; - -async fn loadtest_get_json(user: &mut GooseUser) -> TransactionResult { - let _goose_metrics = user.get("/json").await?; - - Ok(()) -} - -#[tokio::main] -async fn main() -> Result<(), GooseError> { - GooseAttack::initialize()? - .register_scenario( - scenario!("LoadtestTransactions").register_transaction(transaction!(loadtest_get_json)), - ) - .set_default(GooseDefault::Host, "http://localhost:18000")? - .set_default(GooseDefault::HatchRate, "2")? - .set_default( - GooseDefault::CoordinatedOmissionMitigation, - GooseCoordinatedOmissionMitigation::Average, - )? - .set_default(GooseDefault::RunTime, 20)? - .execute() - .await?; - - Ok(()) -} diff --git a/dev/limitador/limitador-server/sandbox/redis-otel/index.html b/dev/limitador/limitador-server/sandbox/redis-otel/index.html deleted file mode 100644 index 2b46c411..00000000 --- a/dev/limitador/limitador-server/sandbox/redis-otel/index.html +++ /dev/null @@ -1,1407 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Limitador instrumentation sandbox - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Limitador instrumentation sandbox

-

Limitador is configured to push traces to an opentelemetry collector.

-

Run sandbox

-
make build
-make deploy-redis-otel
-
-

Run some traffic

-
make grpcurl
-
-
bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM
-{
-    "domain": "test_namespace",
-    "hits_addend": 1,
-    "descriptors": [
-        {
-            "entries": [
-                {
-                    "key": "req.method",
-                    "value": "POST"
-                }
-            ]
-        }
-    ]
-}
-EOM
-
-

See the trace in UI

-
firefox -private-window "localhost:16686"
-
-

Screenshot 2024-03-21 at 17-08-35 Jaeger UI

-
-

Recommended to start looking at check_and_update operation.

-
-

Tear down sandbox

-
make clean
-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/limitador-server/sandbox/redis-tls/index.html b/dev/limitador/limitador-server/sandbox/redis-tls/index.html deleted file mode 100644 index a609dfd0..00000000 --- a/dev/limitador/limitador-server/sandbox/redis-tls/index.html +++ /dev/null @@ -1,1351 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Index - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Index

- -

Testing redis security

-

Execute bash shell in redis pod

-
docker compose -p sandbox exec redis /bin/bash
-
-

Connect to this Redis server with redis-cli:

-
root@e024a29b74ba:/data# redis-cli --tls --cacert /usr/local/etc/redis/certs/ca.crt -a foobared
-
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/limitador/limitador-server/sandbox/redis-tls/redis-config.conf b/dev/limitador/limitador-server/sandbox/redis-tls/redis-config.conf deleted file mode 100644 index b478e76e..00000000 --- a/dev/limitador/limitador-server/sandbox/redis-tls/redis-config.conf +++ /dev/null @@ -1,7 +0,0 @@ -requirepass foobared -port 0 -tls-port 6379 -tls-cert-file /usr/local/etc/redis/certs/redis.crt -tls-key-file /usr/local/etc/redis/certs/redis.key -tls-ca-cert-file /usr/local/etc/redis/certs/ca.crt -tls-auth-clients no diff --git a/dev/limitador/limitador/index.html b/dev/limitador/limitador/index.html deleted file mode 100644 index 431b1361..00000000 --- a/dev/limitador/limitador/index.html +++ /dev/null @@ -1,1355 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - Limitador (library) - Kuadrant Documentation - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -
- - - - - - - - -
- - -
- -
- - - - - - -
-
- - - -
-
-
- - - - - -
-
-
- - - -
-
-
- - - -
-
-
- - - -
-
- - - - - - - - - - - - -

Limitador (library)

-

Crates.io -docs.rs

-

An embeddable rate-limiter library supporting in-memory, Redis and disk data stores.

-

For the complete documentation of the crate's API, please refer to docs.rs

-

Features

-
    -
  • redis_storage: support for using Redis as the data storage backend.
  • -
  • disk_storage: support for using RocksDB as a local disk storage backend.
  • -
  • default: redis_storage.
  • -
- - - - - - - - - - - - - -
-
- - - -
- -
- -
- - - -
- -
-
-
-
- - - - - - - - - - \ No newline at end of file diff --git a/dev/search/search_index.json b/dev/search/search_index.json index 70e8b08f..9ef0152c 100644 --- a/dev/search/search_index.json +++ b/dev/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":"

Kuadrant combines Gateway API with gateway providers like Istio and Envoy Gateway to enhance application connectivity. It enables platform engineers and application developers to easily connect, secure, and protect their services and infrastructure across multiple clusters with policies for TLS, DNS, application authentication & authorization, and rate limiting. Additionally, Kuadrant offers observability templates to further support infrastructure management.

"},{"location":"#getting-started","title":"Getting Started","text":"

For a quick setup of Kuadrant, see our Getting Started guide. Alternatively, explore the architecture in our Architectural Overview.

"},{"location":"getting-started/","title":"Getting Started","text":"

This guide let's you quickly evaluate Kuadrant. You will need a Kubernetes cluster to try out Kuadrant. If you prefer, you can use the following steps to set up a local kind cluster.

"},{"location":"getting-started/#kind-cluster-setup","title":"Kind Cluster Setup","text":"
kind create cluster\n

To use Kuadrant, the LoadBalancer service type is required for Gateways. kind does not have any built-in way to provide IP addresses to these service types. You can follow this guide to set up a LoadBalancer provider for kind.

"},{"location":"getting-started/#installation-options","title":"Installation Options","text":"
  • Install with Helm
  • Install with OLM
"},{"location":"getting-started/#further-reading","title":"Further Reading","text":"

The documentation on this site follows the Di\u00e1taxis framework to better serve you, our users. This approach also helps us create new content and maintain existing material effectively. Under this framework, all content falls into one of four categories, accessible from the side navigation:

  • Concepts - (also called 'Explanations') Deepens and broadens your understanding of Kuadrant.
  • APIs & Reference - Provides concise descriptions of Kuadrant APIs for quick consultation.
  • Tutorials - Offers practical, step-by-step activities for you to safely try out.
  • Guides - Delivers goal-oriented instructions to help you solve specific problems in any environment.
"},{"location":"install-helm/","title":"Install with Helm","text":""},{"location":"install-helm/#prerequisites","title":"Prerequisites","text":"
  • Kubernetes cluster with support for services of type LoadBalancer
  • kubectl CLI
"},{"location":"install-helm/#basic-installation","title":"Basic Installation","text":"

The latest helm installation instructions for the kuadrant operator are maintained at https://artifacthub.io/packages/helm/kuadrant/kuadrant-operator.

After installing the operator, you can create a Kuadrant resource to install the operand components.

kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n  name: kuadrant\n  namespace: kuadrant-system\nEOF\n

If everything went well, the status of the resource should be Ready

kubectl get kuadrant kuadrant -n kuadrant-system -o=jsonpath='{.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}'\n
"},{"location":"install-helm/#next-steps","title":"Next Steps","text":"
  • Try out our Secure, protect, and connect guide
"},{"location":"install-olm/","title":"Install and Configure Kuadrant and Sail via OLM using the kubectl CLI","text":"

This document will walk you through setting up the required configuration to install kaudrant using kustomize or a tool that leverages kustomize such as kubectl along with OLM. It will also go through more advanced configuration options to enable building up a resilient configuration. You can view the full configuration built here: Full AWS Example.

  1. Basic Install

  2. Configure DNS and TLS integration

  3. External Redis for Rate Limit Counters

  4. Limitador Resilient Configuration

  5. Authorino Resilient Configuration

  6. [OpenShift Specific] Setup Observability

"},{"location":"install-olm/#prerequisites","title":"Prerequisites","text":"
  • Kubernetes (or OpenShift) cluster with support for services of type LoadBalancer
  • kubectl CLI
  • OLM installed - (operator lifecycle manager releases)
  • Gateway provider installed
    • If you don't have a Gateway provider installed, steps are included in this guide to install the Sail Operator that will configure and install an Istio installation. Kuadrant is intended to work with Istio or Envoy Gateway.
  • (Optional) cert-manager for automated TLS capabilities:
    • cert-manager Operator for Red Hat OpenShift
    • installing cert-manager via OperatorHub
  • (Optional) Access to AWS, Azure or GCP with DNS service.
  • (Optional) Access to a Redis instance, for persistent storage for your rate limit counters.

Note: for multiple clusters, it would make sense to do the installation via a tool like argocd. For other methods of addressing multiple clusters take a look at the kubectl docs

"},{"location":"install-olm/#basic-installation","title":"Basic Installation","text":"

This first step will install just Kuadrant at a given released version (post v1.x) in the kuadrant-system namespace and the Sail Operator. There will be no credentials/dns providers configured (This is the most basic setup but means TLSPolicy and DNSPolicy will not be able to be used).

Start by creating the following kustomization.yaml in a directory locally. For the purpose of this doc, we will use: ~/kuadrant/ directory.

export KUADRANT_DIR=~/kuadrant\nmkdir -p $KUADRANT_DIR/install\ntouch $KUADRANT_DIR/install/kustomization.yaml\n

Add the below kustomisation CR to the kustomization.yaml created above:

apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n  # choose the cluster preference that matches your scenario below. Set the version by adding ?ref=v1.0.1. Change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/standard?ref=v1.0.1 \n  # - https://github.com/Kuadrant/kuadrant-operator//config/install/openshift?ref=v1.0.1\n\npatches: # remove this subscription patch if you are installing a development version. It will then use the \"preview\" channel\n\n  - patch: |-\n      apiVersion: operators.coreos.com/v1alpha1\n      kind: Subscription\n      metadata:\n        name: kuadrant\n      spec:\n        source: kuadrant-operator-catalog\n        sourceNamespace: kuadrant-system\n        name: kuadrant-operator\n        channel: 'stable' #set to preview if not using a release (for example if using main)\n

And execute the following to apply it to a cluster:

# change the location depending on where you created the kustomization.yaml\nkubectl apply -k $KUADRANT_DIR/install\n
"},{"location":"install-olm/#verify-the-operators-are-installed","title":"Verify the operators are installed:","text":"

OLM should begin installing the dependencies for Kuadrant. To wait for them to be ready, run:

kubectl -n kuadrant-system wait --timeout=160s --for=condition=Available deployments --all\n

Note: you may see no matching resources found if the deployments are not yet present.

Once OLM has finished installing the operators (this can take several minutes). You should see the following in the kuadrant-system namespace:

kubectl get deployments -n kuadrant-system\n\n## Output\n# NAME                                    READY   UP-TO-DATE   AVAILABLE   AGE\n# authorino-operator                      1/1     1            1           83m\n# dns-operator-controller-manager         1/1     1            1           83m\n# kuadrant-console-plugin                 1/1     1            1           83m\n# kuadrant-operator-controller-manager    1/1     1            1           83m\n# limitador-operator-controller-manager   1/1     1            1           83m\n

You can also view the subscription for information about the install:

kubectl get subscription -n kuadrant-system -o=yaml\n
"},{"location":"install-olm/#install-the-operand-components","title":"Install the operand components","text":"

Kuadrant has 2 additional operand components that it manages: Authorino that provides data plane auth integration and Limitador that provides data plane rate limiting. To set these up lets add a new kustomization.yaml in a new sub directory. We will re-use this later for further configuration. We do this as a separate step as we want to have the operators installed first.

Add the following to your local directory. For the purpose of this doc, we will use: $KUADRANT_DIR/configure/kustomization.yaml.

mkdir -p $KUADRANT_DIR/configure\ntouch $KUADRANT_DIR/configure/kustomization.yaml\n

Add the following to the new kustomization.yaml:

apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/configure/standard?ref=v1.0.1 #change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n

Lets apply this to your cluster:

kubectl apply -k $KUADRANT_DIR/configure\n
"},{"location":"install-olm/#verify-kuadrant-is-installed-and-ready","title":"Verify Kuadrant is installed and ready:","text":"
kubectl get kuadrant kuadrant -n kuadrant-system -o=jsonpath='{.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}'\n

You should see the message kuadrant is ready.

"},{"location":"install-olm/#verify-istio-is-configured-and-ready","title":"Verify Istio is configured and ready:","text":"
kubectl wait istio/default --for=condition=ready=true\n

At this point Kuadrant is installed and ready to be used as is Istio as the gateway provider. This means AuthPolicy and RateLimitPolicy can now be configured and used to protect any Gateways you create.

"},{"location":"install-olm/#configure-dns-and-tls-integration","title":"Configure DNS and TLS integration","text":"

In this section will build on the previous steps and expand the kustomization.yaml we created in $KUADRANT_DIR/configure.

In order for cert-manager and the Kuadrant DNS operator to be able to access and manage DNS records and setup TLS certificates and provide external connectivity for your endpoints, you need to setup a credential for these components. To do this, we will use a Kubernetes secret via a kustomize secret generator. You can find other example overlays for each supported cloud provider under the configure directory.

An example lets-encrypt certificate issuer is provided, but for more information on certificate issuers take a look at the cert-manager documentation.

Lets modify our existing local kustomize overlay to setup these secrets and the cluster certificate issuer:

First you will need to setup the required .env file specified in the kuztomization.yaml file in the same directory as your existing configure kustomization. Below is an example for AWS:

touch $KUADRANT_DIR/configure/aws-credentials.env\n
Add the following to your new file

AWS_ACCESS_KEY_ID=xxx\nAWS_SECRET_ACCESS_KEY=xxx\nAWS_REGION=eu-west-1\n

With this setup, lets update our configure kustomization to generate the needed secrets. We will also define a TLS ClusterIssuer (see below). The full kustomization.yaml file should look like:

apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/configure/standard?ref=v1.0.1 #change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n  - cluster-issuer.yaml #(comment if you dont want to use it. The issuer yaml is defined below). Ensure you name the file correctly.\n\n\ngeneratorOptions:\n  disableNameSuffixHash: true\n  labels:\n    app.kubernetes.io/part-of: kuadrant\n    app.kubernetes.io/managed-by: kustomize\n\nsecretGenerator:\n\n  - name: aws-provider-credentials\n    namespace: cert-manager # assumes cert-manager namespace exists.\n    envs:\n      - aws-credentials.env # notice this matches the .env file above. You will need to setup this file locally\n    type: 'kuadrant.io/aws'\n  - name: aws-provider-credentials\n    namespace: gateway-system # this is the namespace where your gateway will be provisioned\n    envs:\n      - aws-credentials.env #notice this matches the .env file above. you need to set up this file locally first. \n    type: 'kuadrant.io/aws'\n

Below is an example Lets-Encrypt Cluster Issuer that uses the aws credential we setup above. Create this in the same directory as the configure kustomization.yaml:

touch $KUADRANT_DIR/configure/cluster-issuer.yaml\n

Add the following to this new file:

# example lets-encrypt cluster issuer that will work with the credentials we will add\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n  name: lets-encrypt-aws\nspec:\n  acme:\n    privateKeySecretRef:\n      name: le-secret\n    server: https://acme-v02.api.letsencrypt.org/directory\n    solvers:\n\n      - dns01:\n          route53:\n            accessKeyIDSecretRef:\n              key: AWS_ACCESS_KEY_ID\n              name: aws-provider-credentials #notice this matches the name of the secret we created.\n            region: us-east-1 #override if needed\n            secretAccessKeySecretRef:\n              key: AWS_SECRET_ACCESS_KEY\n              name: aws-provider-credentials\n

To apply our changes (note this doesn't need to be done in different steps, but is done so here to illustrate how you can build up your configuration of Kuadrant) execute:

kubectl apply -k $KUADRANT_DIR/configure\n

The cluster issuer should become ready:

kubectl wait clusterissuer/lets-encrypt-aws --for=condition=ready=true\n

We create two credentials. One for use with DNSPolicy in the gateway-system namespace and one for use by cert-manager in the cert-manager namespace. With these credentials in place and the cluster issuer configured. You are now ready to start using DNSPolicy and TLSPolicy to secure and connect your Gateways.

"},{"location":"install-olm/#use-an-external-redis","title":"Use an External Redis","text":"

To connect Limitador (the component responsible for rate limiting requests) to redis so that its counters are stored and can be shared with other limitador instances follow these steps:

Again we will build on the kustomization we started. In the same way we did for the cloud provider credentials, we need to setup a redis-credential.env file in the same directory as the kustomization.

touch $KUADRANT_DIR/configure/redis-credentials.env\n

Add the redis connection string to this file in the following format:

URL=redis://xxxx\n

Next we need to add a new secret generator to our existing configure file at $KUADRANT_DIR/configure/kustomization.yaml add it below the other secretGenerators

  - name: redis-credentials\n    namespace: kuadrant-system\n    envs:\n      - redis-credentials.env\n    type: 'kuadrant.io/redis'\n

We also need to patch the existing Limitador resource. Add the following to the $KUADRANT_DIR/configure/kustomization.yaml

patches:\n\n  - patch: |-\n      apiVersion: limitador.kuadrant.io/v1alpha1\n      kind: Limitador\n      metadata:\n        name: limitador\n        namespace: kuadrant-system\n      spec:\n        storage:\n          redis:\n            configSecretRef:\n              name: redis-credentials\n

Your full kustomize.yaml will now be:

apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/configure/standard?ref=v1.0.1 #change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n  - cluster-issuer.yaml #(comment if you dont want to use it. The issuer yaml is defined below). Ensure you name the file correctly.\n\n\ngeneratorOptions:\n  disableNameSuffixHash: true\n  labels:\n    app.kubernetes.io/part-of: kuadrant\n    app.kubernetes.io/managed-by: kustomize\n\nsecretGenerator:\n\n  - name: aws-provider-credentials\n    namespace: cert-manager # assumes cert-manager namespace exists.\n    envs:\n      - aws-credentials.env # notice this matches the .env file above. You will need to setup this file locally\n    type: 'kuadrant.io/aws'\n  - name: aws-provider-credentials\n    namespace: gateway-system # this is the namespace where your gateway will be provisioned\n    envs:\n      - aws-credentials.env #notice this matches the .env file above. you need to set up this file locally first.\n    type: 'kuadrant.io/aws'\n  - name: redis-credentials\n    namespace: kuadrant-system\n    envs:\n      - redis-credentials.env\n    type: 'kuadrant.io/redis'\n\npatches:\n\n  - patch: |-\n      apiVersion: limitador.kuadrant.io/v1alpha1\n      kind: Limitador\n      metadata:\n        name: limitador\n        namespace: kuadrant-system\n      spec:\n        storage:\n          redis:\n            configSecretRef:\n              name: redis-credentials\n

Re-Apply the configuration to setup the new secret and configuration:

kubectl apply -k $KUADRANT_DIR/configure/\n

Limitador is now configured to use the provided redis connection URL as a data store for rate limit counters. Limitador will become temporarily unavailable as it restarts.

"},{"location":"install-olm/#validate","title":"Validate","text":"

Validate Kuadrant is in a ready state as before:

kubectl get kuadrant kuadrant -n kuadrant-system -o=wide\n\n# NAME       STATUS   AGE\n# kuadrant   Ready    61m\n
"},{"location":"install-olm/#resilient-deployment-of-data-plane-components","title":"Resilient Deployment of data plane components","text":""},{"location":"install-olm/#limitador-topologyconstraints-poddisruptionbudget-and-resource-limits","title":"Limitador: TopologyConstraints, PodDisruptionBudget and Resource Limits","text":"

To set limits, replicas and a PodDisruptionBudget for limitador you can add the following to the existing limitador patch in your local limitador in the $KUADRANT_DIR/configure/kustomize.yaml spec:

pdb:\n  maxUnavailable: 1\nreplicas: 2\nresourceRequirements:\n    requests:\n      cpu: 10m\n      memory: 10Mi # set these based on your own needs.\n

re-apply the configuration. This will result in two instances of limitador becoming available and a podDisruptionBudget being setup:

kubectl apply -k $KUADRANT_DIR/configure/\n

For topology constraints, you will need to patch the limitador deployment directly:

add the below yaml to a limitador-topoloy-patch.yaml file under a $KUADRANT_DIR/configure/patches directory:

mkdir -p $KUADRANT_DIR/configure/patches\ntouch $KUADRANT_DIR/configure/patches/limitador-topoloy-patch.yaml\n
spec:\n  template:\n    spec:\n      topologySpreadConstraints:\n\n        - maxSkew: 1\n          topologyKey: kubernetes.io/hostname\n          whenUnsatisfiable: ScheduleAnyway\n          labelSelector:\n            matchLabels:\n              limitador-resource: limitador\n        - maxSkew: 1\n          topologyKey: kubernetes.io/zone\n          whenUnsatisfiable: ScheduleAnyway\n          labelSelector:\n            matchLabels:\n              limitador-resource: limitador\n

Apply this to the existing limitador deployment

kubectl patch deployment limitador-limitador -n kuadrant-system --patch-file $KUADRANT_DIR/configure/patches/limitador-topoloy-patch.yaml\n
"},{"location":"install-olm/#authorino-topologyconstraints-poddisruptionbudget-and-resource-limits","title":"Authorino: TopologyConstraints, PodDisruptionBudget and Resource Limits","text":"

To increase the number of replicas for Authorino add a new patch to the $KUADRANT_DIR/configure/kustomization.yaml

  - patch: |-\n      apiVersion: operator.authorino.kuadrant.io/v1beta1\n      kind: Authorino\n      metadata:\n        name: authorino\n        namespace: kuadrant-system\n      spec:\n        replicas: 2\n

and re-apply the configuration:

kubectl apply -k $KUADRANT_DIR/configure/\n

To add resource limits and or topology constraints to Authorino. You will need to patch the Authorino deployment directly: Add the below yaml to a authorino-topoloy-patch.yaml under the $KUADRANT_DIR/configure/patches directory:

touch $KUADRANT_DIR/configure/patches/authorino-topoloy-patch.yaml\n
spec:\n  template:\n    spec:\n      containers:\n\n        - name: authorino\n          resources:\n            requests:\n              cpu: 10m # set your own needed limits here\n              memory: 10Mi # set your own needed limits here\n      topologySpreadConstraints:\n        - maxSkew: 1\n          topologyKey: kubernetes.io/hostname\n          whenUnsatisfiable: ScheduleAnyway\n          labelSelector:\n            matchLabels:\n              authorino-resource: authorino\n        - maxSkew: 1\n          topologyKey: kubernetes.io/zone\n          whenUnsatisfiable: ScheduleAnyway\n          labelSelector:\n            matchLabels:\n              authorino-resource: authorino\n

Apply the patch:

kubectl patch deployment authorino -n kuadrant-system --patch-file $KUADRANT_DIR/configure/patches/authorino-topoloy-patch.yaml\n

Kuadrant is now installed and ready to use and the data plane components are configured to be distributed and resilient.

For reference the full configure kustomization should look like:

kind: Kustomization\nresources:\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/configure/standard?ref=v1.0.1 #change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n  - cluster-issuer.yaml\ngeneratorOptions:\n  disableNameSuffixHash: true\n  labels:\n    app.kubernetes.io/part-of: kuadrant\n    app.kubernetes.io/managed-by: kustomize\n\nsecretGenerator:\n\n  - name: aws-provider-credentials\n    namespace: cert-manager # assumes cert-manager namespace exists.\n    envs:\n      - aws-credentials.env # notice this matches the .env file above. You will need to setup this file locally\n    type: 'kuadrant.io/aws'\n  - name: aws-provider-credentials\n    namespace: gateway-system # this is the namespace where your gateway will be provisioned\n    envs:\n      - aws-credentials.env #notice this matches the .env file above. you need to set up this file locally first.\n    type: 'kuadrant.io/aws'\n  - name: redis-credentials\n    namespace: kuadrant-system\n    envs:\n      - redis-credentials.env\n    type: 'kuadrant.io/redis'\n\npatches:\n\n  - patch: |-\n      apiVersion: limitador.kuadrant.io/v1alpha1\n      kind: Limitador\n      metadata:\n        name: limitador\n        namespace: kuadrant-system\n      spec:\n        pdb:\n          maxUnavailable: 1\n        replicas: 2\n        resourceRequirements:\n          requests:\n            cpu: 10m\n            memory: 10Mi # set these based on your own needs.\n        storage:\n          redis:\n            configSecretRef:\n              name: redis-credentials\n  - patch: |-\n      apiVersion: operator.authorino.kuadrant.io/v1beta1\n      kind: Authorino\n      metadata:\n        name: authorino\n        namespace: kuadrant-system\n      spec:\n        replicas: 2\n
The configure directory should contain the following:

configure/\n\u251c\u2500\u2500 aws-credentials.env\n\u251c\u2500\u2500 cluster-issuer.yaml\n\u251c\u2500\u2500 kustomization.yaml\n\u251c\u2500\u2500 patches\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 authorino-topoloy-patch.yaml\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 limitador-topoloy-patch.yaml\n\u2514\u2500\u2500 redis-credentials.env\n
"},{"location":"install-olm/#set-up-observability-openshift-only","title":"Set up observability (OpenShift Only)","text":"

Verify that user workload monitoring is enabled in your Openshift cluster. If it not enabled, check the Openshift documentation for how to do this.

kubectl get configmap cluster-monitoring-config -n openshift-monitoring -o jsonpath='{.data.config\\.yaml}'|grep enableUserWorkload\n# (expected output)\n# enableUserWorkload: true\n

Install the gateway & Kuadrant metrics components and configuration, including Grafana.

# change the version as needed\nkubectl apply -k https://github.com/Kuadrant/kuadrant-operator//config/install/configure/observability?ref=v1.0.1\n

Configure the Openshift thanos-query instance as a data source in Grafana.

TOKEN=\"Bearer $(oc whoami -t)\"\nHOST=\"$(kubectl -n openshift-monitoring get route thanos-querier -o jsonpath='https://{.status.ingress[].host}')\"\necho \"TOKEN=$TOKEN\" > config/observability/openshift/grafana/datasource.env\necho \"HOST=$HOST\" >> config/observability/openshift/grafana/datasource.env\nkubectl apply -k config/observability/openshift/grafana\n

Create the example dashboards in Grafana

kubectl apply -k https://github.com/Kuadrant/kuadrant-operator//examples/dashboards?ref=v1.0.1\n

Access the Grafana UI, using the default user/pass of root/secret. You should see the example dashboards in the 'monitoring' folder. For more information on the example dashboards, check out the documentation.

kubectl -n monitoring get routes grafana-route -o jsonpath=\"https://{.status.ingress[].host}\"\n
"},{"location":"install-olm/#next-steps","title":"Next Steps","text":"
  • Try out our Secure, protect, and connect guide
"},{"location":"kuadrant-operator/","title":"Kuadrant Operator","text":""},{"location":"kuadrant-operator/#overview","title":"Overview","text":"

Kuadrant leverages Gateway API and Policy Attachment to enhance gateway providers like Istio and Envoy Gateway with additional features via Policies. Those features include TLS, DNS, application authentication & authorization, and rate limiting.

You can find more information on the different aspects of Kuadrant at the documentation links below:

  • Overview
  • Getting Started & Installation
  • Architecture
"},{"location":"kuadrant-operator/#contributing","title":"Contributing","text":"

The Development guide describes how to build the kuadrant operator and how to test your changes before submitting a patch or opening a PR.

Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.

"},{"location":"kuadrant-operator/#licensing","title":"Licensing","text":"

This software is licensed under the Apache 2.0 license.

See the LICENSE and NOTICE files that should have been provided along with this software for details.

"},{"location":"kuadrant-operator/doc/install/install-make/","title":"Installing Kuadrant via make targets","text":""},{"location":"kuadrant-operator/doc/install/install-make/#overview","title":"Overview","text":"

The following doc will show you how to install the Kuadrant Operator using make targets in the Kuadrant operator repo. What will be installed is Istio, Kubernetes Gateway API and Kuadrant itself.

Note: In production environment, these steps are usually performed by a cluster operator with administrator privileges over the Kubernetes cluster.

"},{"location":"kuadrant-operator/doc/install/install-make/#pre-requisites","title":"Pre-requisites","text":"
  • Kind
  • Docker or Podman
"},{"location":"kuadrant-operator/doc/install/install-make/#setup","title":"Setup","text":"

Clone the project:

git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n

Setup the environment (This will also create a kind cluster. If your using Pod man use the env var CONTAINER_ENGINE=podman with the make target below.):

make local-setup\n

Request an instance of Kuadrant:

kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n  name: kuadrant\nspec: {}\nEOF\n

"},{"location":"kuadrant-operator/doc/install/mtls-configuration/","title":"Configure mTLS between the Gateway and Kuadrant components","text":""},{"location":"kuadrant-operator/doc/install/mtls-configuration/#overview","title":"Overview","text":"

This guide includes manual steps to enable mTLS between an Istio provided gateway and the Kuadrant components. If you use an AuthPolicy or RateLimitPolicy, there will be communication between the gateway and the respective Kuadrant components at request time. This communication happens between the Wasm plugin in Envoy proxy, and Authorino or Limitador. At the time of writing there is an RFC discussing how to add mTLS capabilities as a feature of the Kuadrant operator. If you are interested in having that feature or influencing how it is delivered, please engage on that pull request.

Note

This method currently only works if the Gateway is provided by Istio, with service mesh capabilities enabled across the cluster. For example, the Istio CNI agent is running on each node.

"},{"location":"kuadrant-operator/doc/install/mtls-configuration/#prerequisites","title":"Prerequisites","text":"
  • You have installed Kuadrant in a Kubernetes cluster.
  • Additionally, you have at least 1 AuthPolicy or RateLimitPolicy attached to your Gateway or HTTPRoute.
"},{"location":"kuadrant-operator/doc/install/mtls-configuration/#enabling-mtls","title":"Enabling mTLS","text":""},{"location":"kuadrant-operator/doc/install/mtls-configuration/#kuadrant-components","title":"Kuadrant components","text":"

As the Kuadrant components (Authorino & Limitador) are already part of the service mesh in Istio, mTLS can be enabled after an Envoy proxy sidecar is deployed alongside them. To do this, apply the Istio sidecar label to both Deployment templates.

kubectl -n kuadrant-system patch deployment authorino \\\n  -p '{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"sidecar.istio.io/inject\":\"true\"}}}}}'\n\nkubectl -n kuadrant-system patch deployment limitador-limitador \\\n  -p '{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"sidecar.istio.io/inject\":\"true\"}}}}}'\n

You should see the number of containers in either pod increase from 1 to 2, as the istio-proxy is added to the pods. This change will force all traffic to those pods to go through the proxy. However, mTLS is not enabled yet.

"},{"location":"kuadrant-operator/doc/install/mtls-configuration/#envoy-filter","title":"Envoy Filter","text":"

The next step enables mTLS for traffic originating in the gateway (where the Wasm plugin executes), going to the Kuadrant components. This requires modifying the EnvoyFilters directly.

Note

Any changes to the EnvoyFilters may be reverted by the Kuadrant operator when related resources like Gateways, HTTPRoutes or policies are modified. It is recommended to automate the next step, for example via a job or GitOps controller, to ensure the changes persist.

The EnvoyFilter resources will typically have a name prefix of kuadrant- in the same namespace as your Gateway. Add the snippet below to the spec.configPatches[].patch.value value in each EnvoyFilter.

        transport_socket:\n          name: envoy.transport_sockets.tls\n          typed_config:\n            '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n            common_tls_context:\n              tls_certificate_sds_secret_configs:\n\n              - name: default\n                sds_config:\n                  api_config_source:\n                    api_type: GRPC\n                    grpc_services:\n                    - envoy_grpc:\n                        cluster_name: sds-grpc\n              validation_context_sds_secret_config:\n                name: ROOTCA\n                sds_config:\n                  api_config_source:\n                    api_type: GRPC\n                    grpc_services:\n                    - envoy_grpc:\n                        cluster_name: sds-grpc\n

The envoy.transport_sockets.tls transport socket name tells Envoy to use the built-in TLS transport socket, enabling TLS encryption. The @type specifies that the configuration follows the UpstreamTlsContext message from Envoy's TLS transport socket extension. This is used for client-side TLS settings. The tls_certificate_sds_secret_configs configures Envoy to obtain client certificates and private keys via the Secret Discovery Service (SDS) over GRPC. The validation_context_sds_secret_config configures Envoy to obtain the root CA certificates via SDS (over GRPC) to validate the server's certificate.

"},{"location":"kuadrant-operator/doc/install/mtls-configuration/#istio-configuration","title":"Istio configuration","text":"

The last step is to ensure Authorino and Limitador are configured to require and accept mTLS connections. In Istio, this is done by creating a PeerAuthentication resource where the mtls mode is set to STRICT. The below command will enable STRICT mode on all pods with Istio sidecar injection in the kuadrant-system namespace.

kubectl apply -f - <<EOF\napiVersion: security.istio.io/v1\nkind: PeerAuthentication\nmetadata:\n  name: default\n  namespace: kuadrant-system\nspec:\n  mtls:\n    mode: STRICT\nEOF\n

If you prefer to only enable mTLS for a specific component, you can modify just the EnvoyFilter and Deployment for that component. Then, when creating the PeerAuthentication resource, you can be more specific about what pods the mTLS mode apply to. For example, the following resource would enable STRICT mode just for the Limitador component.

apiVersion: security.istio.io/v1\nkind: PeerAuthentication\nmetadata:\n  name: limitador-mtls\n  namespace: kuadrant-system\nspec:\n  selector:\n    matchLabels:\n      app: limitador\n  mtls:\n    mode: STRICT\n
"},{"location":"kuadrant-operator/doc/install/mtls-configuration/#disabling-mtls","title":"Disabling mTLS","text":"

To disable mTLS, remove the transport_socket changes from any EnvoyFilters. Then you can either set the mTLS mode to PERMISSIVE in the PeerAuthentication resource:

kubectl patch peerauthentication default -n kuadrant-system --type='merge' -p '{\"spec\":{\"mtls\":{\"mode\":\"PERMISSIVE\"}}}'\n

Or delete the resource:

kubectl delete peerauthentication -n kuadrant-system default\n

You don't have to remove the sidecar from the Kuadrant components, but it is safe to do so by removing the sidecar.istio.io/inject label:

kubectl -n kuadrant-system patch deployment authorino \\\n  --type='json' \\\n  -p='[{\"op\": \"remove\", \"path\": \"/spec/template/metadata/labels/sidecar.istio.io~1inject\"}]'\n\nkubectl -n kuadrant-system patch deployment limitador-limitador \\\n  --type='json' \\\n  -p='[{\"op\": \"remove\", \"path\": \"/spec/template/metadata/labels/sidecar.istio.io~1inject\"}]'\n

Or set the value to false:

kubectl -n kuadrant-system patch deployment authorino \\\n  -p '{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"sidecar.istio.io/inject\":\"false\"}}}}}'\n\n\nkubectl -n kuadrant-system patch deployment limitador-limitador \\\n  -p '{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"sidecar.istio.io/inject\":\"false\"}}}}}'\n
"},{"location":"kuadrant-operator/doc/observability/examples/","title":"Example Dashboards and Alerts","text":"

Explore a variety of starting points for monitoring your Kuadrant installation with our examples folder. These dashboards and alerts are ready-to-use and easily customizable to fit your environment.

There are some example dashboards uploaded to Grafana.com . You can use the ID's listed below to import these dashboards into Grafana:

Name ID App Developer Dashboard 21538 Business User Dashboard 20981 Platform Engineer Dashboard 20982 DNS Operator Dashboard 22695"},{"location":"kuadrant-operator/doc/observability/examples/#dashboards","title":"Dashboards","text":""},{"location":"kuadrant-operator/doc/observability/examples/#importing-dashboards-into-grafana","title":"Importing Dashboards into Grafana","text":"

For more details on how to import dashboards into Grafana, visit the import dashboards page.

  • UI Method:
    • JSON - Use the 'Import' feature in the Grafana UI to upload dashboard JSON files directly.
    • ID - Use the 'Import' feature in the Grafana UI to import via Grafana.com using a Dashboard ID.
  • ConfigMap Method: Automate dashboard provisioning by adding files to a ConfigMap, which should be mounted at /etc/grafana/provisioning/dashboards.

Datasources are configured as template variables, automatically integrating with your existing data sources. Metrics for these dashboards are sourced from Prometheus. For more details on the metrics used, visit the metrics documentation page.

"},{"location":"kuadrant-operator/doc/observability/examples/#alerts","title":"Alerts","text":""},{"location":"kuadrant-operator/doc/observability/examples/#setting-up-alerts-in-prometheus","title":"Setting Up Alerts in Prometheus","text":"

You can integrate the example alerts into Prometheus as PrometheusRule resources. Feel free to adjust alert thresholds to suit your specific operational needs.

Additionally, Service Level Objective (SLO) alerts generated with Sloth are included. A benefit of these alerts is the ability to integrate them with this SLO dashboard, which utilizes generated labels to comprehensively overview your SLOs.

Further information on the metrics used for these alerts can be found on the metrics page.

"},{"location":"kuadrant-operator/doc/observability/metrics/","title":"Metrics","text":"

This is a reference page for some of the different metrics used in example dashboards and alerts. It is not an exhaustive list. The documentation for each component may provide more details on a per-component basis. Some of the metrics are sourced from components outside the Kuadrant project, for example, Envoy. The value of this reference is showing some of the more widely desired metrics, and how to join the metrics from different sources together in a meaningful way.

"},{"location":"kuadrant-operator/doc/observability/metrics/#metrics-sources","title":"Metrics sources","text":"
  • Kuadrant components
  • Istio
  • Envoy
  • Kube State Metrics
  • Gateway API State Metrics
  • Kubernetes metrics
"},{"location":"kuadrant-operator/doc/observability/metrics/#resource-usage-metrics","title":"Resource usage metrics","text":"

Resource metrics, like CPU, memory and disk usage, primarily come from the Kubernetes metrics components. These include container_cpu_usage_seconds_total, container_memory_working_set_bytes and kubelet_volume_stats_used_bytes. A stable list of metrics is maintained in the Kubernetes repository. These low-level metrics typically have a set of recording rules that aggregate values by labels and time ranges. For example, node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate or namespace_workload_pod:kube_pod_owner:relabel. If you have deployed the kube-prometheus project, you should have the majority of these metrics being scraped.

"},{"location":"kuadrant-operator/doc/observability/metrics/#networking-metrics","title":"Networking metrics","text":"

Low-level networking metrics like container_network_receive_bytes_total are also available from the Kubernetes metrics components. HTTP & GRPC traffic metrics with higher level labels are available from Istio. One of the main metrics would be istio_requests_total, which is a counter incremented for every request handled by an Istio proxy. Latency metrics are available via the istio_request_duration_milliseconds metric, with buckets for varying response times.

Some example dashboards have panels that make use of the request URL path. The path is not added as a label to Istio metrics by default, as it has the potential to increase metric cardinality, and thus storage requirements. If you want to make use of the path in your queries or visualisations, you can enable the request path metric via the Telemetry resource in istio:

apiVersion: telemetry.istio.io/v1alpha1\nkind: Telemetry\nmetadata:\n  name: namespace-metrics\n  namespace: gateway-system\nspec:\n  metrics:\n\n  - providers:\n    - name: prometheus\n    overrides:\n    - match:\n        metric: REQUEST_COUNT\n      tagOverrides:\n        request_url_path:\n          value: \"request.url_path\"\n    - match:      \n        metric: REQUEST_DURATION\n      tagOverrides:\n        request_url_path:\n          value: \"request.url_path\"\n
"},{"location":"kuadrant-operator/doc/observability/metrics/#state-metrics","title":"State metrics","text":"

The kube-state-metrics project exposes the state of various kuberenetes resources as metrics and labels. For example, the ready status of a Pod is available as kube_pod_status_ready, with labels for the pod name and namespace. This can be useful for linking lower level container metrics back to a meaningful resource in the Kubernetes world.

"},{"location":"kuadrant-operator/doc/observability/metrics/#joining-metrics","title":"Joining metrics","text":"

Metric queries can be as simple as just the name of the metric, or can be complex with joining & grouping. A lot of the time it can be useful to tie back low level metrics to more meaningful Kubernetes resources. For example, if the memory usage is maxed out on a container and that container is constantly being OOMKilled, it can be useful to get the Deployment and Namespace of that container for debugging. Prometheus query language (or promql) allows vector matching or results (sometimes called joining).

When using Gateway API and Kuadrant resources like HTTPRoute and RateLimitPolicy, the state metrics can be joined to Istio metrics to give a meaningful result set. Here's an example that queries the number of requests per second, and includes the name of the HTTPRoute that the traffic is for.

sum(\n    rate(\n        istio_requests_total{}[5m]\n    )\n) by (destination_service_name)\n\n\n* on(destination_service_name) group_right \n    label_replace(gatewayapi_httproute_labels{}, \\\"destination_service_name\\\", \\\"$1\\\",\\\"service\\\", \\\"(.+)\\\")\n

Breaking this query down, there are 2 parts. The first part is getting the rate of requests hitting the Istio gateway, aggregated to 5m intervals:

sum(\n    rate(\n        destination_service_name{}[5m]\n    )\n) by (destination_service_name)\n

The result set here will include a label for the destination service name (i.e. the Service in Kubernetes). This label is key to looking up the HTTPRoute this traffic belongs to.

The 2nd part of the query uses the gatewayapi_httproute_labels metric and the label_replace function. The gatewayapi_httproute_labels metric gives a list of all httproutes, including any labels on them. The HTTPRoute in this example has a label called 'service', set to be the same as the Istio service name. This allows us to join the 2 results set. However, because the label doesn't match exactly (destination_service_name and service), we can replace the label so that it does match. That's what the label_replace does.

    label_replace(gatewayapi_httproute_labels{}, \\\"destination_service_name\\\", \\\"$1\\\",\\\"service\\\", \\\"(.+)\\\")\n

The 2 parts are joined together using vector matching.

* on(destination_service_name) group_right \n
  • * is the binary operator i.e. multiplication (gives join like behaviour)
  • on() specifies which labels to \"join\" the 2 results with
  • group_right enables a one to many matching.

See the Prometheus documentation for further details on matching.

"},{"location":"kuadrant-operator/doc/observability/tracing/","title":"Enabling tracing with a central collector","text":""},{"location":"kuadrant-operator/doc/observability/tracing/#introduction","title":"Introduction","text":"

This guide outlines the steps to enable tracing in Istio and Kuadrant components (Authorino and Limitador), directing traces to a central collector for improved observability and troubleshooting. We'll also explore a typical troubleshooting flow using traces and logs.

"},{"location":"kuadrant-operator/doc/observability/tracing/#prerequisites","title":"Prerequisites","text":"
  • A Kubernetes cluster with Istio and Kuadrant installed.
  • A trace collector (e.g., Jaeger or Tempo) configured to support OpenTelemetry (OTel).
"},{"location":"kuadrant-operator/doc/observability/tracing/#configuration-steps","title":"Configuration Steps","text":""},{"location":"kuadrant-operator/doc/observability/tracing/#istio-tracing-configuration","title":"Istio Tracing Configuration","text":"

Enable tracing in Istio by using the Telemetry API. Depending on your method for installing Istio, you will need to configure a tracing extensionProvider in your MeshConfig, Istio or IstioOperator resource as well. Here is an example Telemetry and Istio config to sample 100% of requests, if using the Istio Sail Operator.

apiVersion: telemetry.istio.io/v1alpha1\nkind: Telemetry\nmetadata:\n  name: mesh-default\n  namespace: gateway-system\nspec:\n  tracing:\n\n  - providers:\n    - name: tempo-otlp\n    randomSamplingPercentage: 100\n---\napiVersion: operator.istio.io/v1alpha1\nkind: Istio\nmetadata:\n  name: default\nspec:\n  namespace: gateway-system\n  values:\n    meshConfig:\n      defaultConfig:\n        tracing: {}\n      enableTracing: true\n      extensionProviders:\n      - name: tempo-otlp\n        opentelemetry:\n          port: 4317\n          service: tempo.tempo.svc.cluster.local\n

Important:

The OpenTelemetry collector protocol should be explicitly set in the service port name or appProtocol fields as per the Istio documentation. For example, when using gRPC, the port name should begin with grpc- or the appProtocol should be grpc.

"},{"location":"kuadrant-operator/doc/observability/tracing/#kuadrant-tracing-configuration","title":"Kuadrant Tracing Configuration","text":"

The Authorino and Limitador components have request tracing capabilities. Here is an example configuration to enable and send traces to a central collector. Ensure the collector is the same one that Istio is sending traces so that they can be correlated later.

apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  tracing:\n    endpoint: rpc://tempo.tempo.svc.cluster.local:4317\n    insecure: true\n---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador\nspec:\n  tracing:\n    endpoint: rpc://tempo.tempo.svc.cluster.local:4317\n

Once the changes are applied, the authorino and limitador components will be redeployed tracing enabled.

Note:

There are plans to consolidate the tracing configuration to a single location i.e. the Kuadrant CR. This will eventually eliminate the need to configure tracing in both the Authorino and Limitador CRs.

Important:

Currently, trace IDs do not propagate to wasm modules in Istio/Envoy, affecting trace continuity in Limitador. This means that requests passed to limitador will not have the relavant 'parent' trace ID in its trace information. If however the trace initiation point is outside of Envoy/Istio, the 'parent' trace ID will be available to limitador and included in traces passed to the collector. This has an impact on correlating traces from limitador with traces from authorino, the gateway and any other components in the path of requests.

"},{"location":"kuadrant-operator/doc/observability/tracing/#troubleshooting-flow-using-traces-and-logs","title":"Troubleshooting Flow Using Traces and Logs","text":"

Using a tracing interface like the Jaeger UI or Grafana, you can search for trace information by the trace ID. You may get the trace ID from logs, or from a header in a sample request you want to troubleshoot. You can also search for recent traces, filtering by the service you want to focus on.

Here is an example trace in the Grafana UI showing the total request time from the gateway (Istio), the time to check the curent rate limit count (and update it) in limitador and the time to check auth in Authorino:

In limitador, it is possible to enable request logging with trace IDs to get more information on requests. This requires the log level to be increased to at least debug, so the verbosity must be set to 3 or higher in the Limitador CR. For example:

apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador\nspec:\n  verbosity: 3\n

A log entry will look something like this, with the traceparent field holding the trace ID:

\"Request received: Request { metadata: MetadataMap { headers: {\"te\": \"trailers\", \"grpc-timeout\": \"5000m\", \"content-type\": \"application/grpc\", \"traceparent\": \"00-4a2a933a23df267aed612f4694b32141-00f067aa0ba902b7-01\", \"x-envoy-internal\": \"true\", \"x-envoy-expected-rq-timeout-ms\": \"5000\"} }, message: RateLimitRequest { domain: \"default/toystore\", descriptors: [RateLimitDescriptor { entries: [Entry { key: \"limit.general_user__f5646550\", value: \"1\" }, Entry { key: \"metadata.filter_metadata.envoy\\\\.filters\\\\.http\\\\.ext_authz.identity.userid\", value: \"alice\" }], limit: None }], hits_addend: 1 }, extensions: Extensions }\"\n

If you centrally aggregate logs using something like promtail and loki, you can jump between trace information and the relevant logs for that service:

Using a combination of tracing and logs, you can visualise and troubleshoot reuqest timing issues and drill down to specific services. This method becomes even more powerful when combined with metrics and dashboards to get a more complete picture of your users traffic.

"},{"location":"kuadrant-operator/doc/overviews/auth/","title":"Kuadrant Auth","text":"

A Kuadrant AuthPolicy custom resource:

  1. Targets Gateway API networking resources such as HTTPRoutes and Gateways, using these resources to obtain additional context, i.e., which traffic workload (HTTP attributes, hostnames, user attributes, etc) to enforce auth.
  2. Supports targeting subsets (sections) of a network resource to apply the auth rules to.
  3. Abstracts the details of the underlying external authorization protocol and configuration resources, that have a much broader remit and surface area.
  4. Enables cluster operators to set defaults that govern behavior at the lower levels of the network, until a more specific policy is applied.
"},{"location":"kuadrant-operator/doc/overviews/auth/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/overviews/auth/#envoys-external-authorization-protocol","title":"Envoy's External Authorization Protocol","text":"

Kuadrant's Auth implementation relies on the Envoy's External Authorization protocol. The workflow per request goes:

  1. On incoming request, the gateway checks the matching rules for enforcing the auth rules, as stated in the AuthPolicy custom resources and targeted Gateway API networking objects
  2. If the request matches, the gateway sends one CheckRequest to the external auth service (\"Authorino\").
  3. The external auth service responds with a CheckResponse back to the gateway with either an OK or DENIED response code.

An AuthPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external auth service.

"},{"location":"kuadrant-operator/doc/overviews/auth/#the-authpolicy-custom-resource","title":"The AuthPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/overviews/auth/#overview","title":"Overview","text":"

The AuthPolicy spec includes the following parts:

  • A reference to an existing Gateway API resource (spec.targetRef)
  • Authentication/authorization scheme (spec.rules)
  • Top-level additional conditions (spec.when)
  • List of named patterns (spec.patterns)

The auth scheme specify rules for:

  • Authentication (spec.rules.authentication)
  • External auth metadata fetching (spec.rules.metadata)
  • Authorization (spec.rules.authorization)
  • Custom response items (spec.rules.response)
  • Callbacks (spec.rules.callbacks)

Each auth rule can declare specific when conditions for the rule to apply.

The auth scheme (rules), as well as conditions and named patterns can be declared at the top-level level of the spec (with the semantics of defaults) or alternatively within explicit defaults or overrides blocks.

Check out the API reference for a full specification of the AuthPolicy CRD.

"},{"location":"kuadrant-operator/doc/overviews/auth/#using-the-authpolicy","title":"Using the AuthPolicy","text":""},{"location":"kuadrant-operator/doc/overviews/auth/#targeting-a-httproute-networking-resource","title":"Targeting a HTTPRoute networking resource","text":"

When an AuthPolicy targets a HTTPRoute, the policy is enforced to all traffic routed according to the rules and hostnames specified in the HTTPRoute, across all Gateways referenced in the spec.parentRefs field of the HTTPRoute.

The targeted HTTPRoute's rules and/or hostnames to which the policy must be enforced can be filtered to specific subsets.

Target a HTTPRoute by setting the spec.targetRef field of the AuthPolicy as follows:

apiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: my-route-auth\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: <HTTPRoute Name>\n  rules: { \u2026 }\n
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510             \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (Infra namespace) \u2502             \u2502   (App namespace)  \u2502\n\u2502                   \u2502             \u2502                    \u2502\n\u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u2502  parentRefs \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510     \u2502\n\u2502  \u2502 Gateway \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2524 HTTPRoute \u2502     \u2502\n\u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2502             \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518     \u2502\n\u2502                   \u2502             \u2502        \u25b2           \u2502\n\u2502                   \u2502             \u2502        \u2502           \u2502\n\u2502                   \u2502             \u2502        \u2502           \u2502\n\u2502                   \u2502             \u2502        \u2502 targetRef \u2502\n\u2502                   \u2502             \u2502        \u2502           \u2502\n\u2502                   \u2502             \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502\n\u2502                   \u2502             \u2502  \u2502 AuthPolicy \u2502    \u2502\n\u2502                   \u2502             \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502\n\u2502                   \u2502             \u2502                    \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518             \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/overviews/auth/#hostnames-and-wildcards","title":"Hostnames and wildcards","text":"

If an AuthPolicy targets a route defined for *.com and another AuthPolicy targets another route for api.com, the Kuadrant control plane will not merge these two AuthPolicies. Rather, it will mimic the behavior of gateway implementation by which the \"most specific hostname wins\", thus enforcing only the corresponding applicable policies and auth rules.

E.g., a request coming for api.com will be protected according to the rules from the AuthPolicy that targets the route for api.com; while a request for other.com will be protected with the rules from the AuthPolicy targeting the route for *.com.

Example with 3 AuthPolicies and 3 HTTPRoutes:

  • AuthPolicy A \u2192 HTTPRoute A (a.toystore.com)
  • AuthPolicy B \u2192 HTTPRoute B (b.toystore.com)
  • AuthPolicy W \u2192 HTTPRoute W (*.toystore.com)

Expected behavior:

  • Request to a.toystore.com \u2192 AuthPolicy A will be enforced
  • Request to b.toystore.com \u2192 AuthPolicy B will be enforced
  • Request to other.toystore.com \u2192 AuthPolicy W will be enforced
"},{"location":"kuadrant-operator/doc/overviews/auth/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"

An AuthPolicy that targets a Gateway can declare a block of defaults (spec.defaults) or a block of overrides (spec.overrides). As a standard, gateway policies that do not specify neither defaults nor overrides, act as defaults.

When declaring defaults, an AuthPolicy which targets a Gateway will be enforced to all HTTP traffic hitting the gateway, unless a more specific AuthPolicy targeting a matching HTTPRoute exists. Any new HTTPRoute referrencing the gateway as parent will be automatically covered by the default AuthPolicy, as well as changes in the existing HTTPRoutes.

Defaults provide cluster operators with the ability to protect the infrastructure against unplanned and malicious network traffic attempt, such as by setting preemptive \"deny-all\" policies on hostnames and hostname wildcards.

Inversely, a gateway policy that specify overrides declares a set of rules to be enforced on all routes attached to the gateway, thus atomically replacing any more specific policy occasionally attached to any of those routes.

Target a Gateway HTTPRoute by setting the spec.targetRef field of the AuthPolicy as follows:

apiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: my-gw-auth\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n  defaults: # alternatively: `overrides`\n    rules: { \u2026 }\n
\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510             \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (Infra namespace) \u2502             \u2502   (App namespace)  \u2502\n\u2502                   \u2502             \u2502                    \u2502\n\u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u2502  parentRefs \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510     \u2502\n\u2502  \u2502 Gateway \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2524 HTTPRoute \u2502     \u2502\n\u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2502             \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518     \u2502\n\u2502       \u25b2           \u2502             \u2502        \u25b2           \u2502\n\u2502       \u2502           \u2502             \u2502        \u2502           \u2502\n\u2502       \u2502           \u2502             \u2502        \u2502           \u2502\n\u2502       \u2502 targetRef \u2502             \u2502        \u2502 targetRef \u2502\n\u2502       \u2502           \u2502             \u2502        \u2502           \u2502\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502             \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502\n\u2502 \u2502 AuthPolicy \u2502    \u2502             \u2502  \u2502 AuthPolicy \u2502    \u2502\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502             \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502\n\u2502                   \u2502             \u2502                    \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518             \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/overviews/auth/#overlapping-gateway-and-httproute-authpolicies","title":"Overlapping Gateway and HTTPRoute AuthPolicies","text":"

Two possible semantics are to be considered here \u2013 gateway policy defaults vs gateway policy overrides.

Gateway AuthPolicies that declare defaults (or alternatively neither defaults nor overrides) protect all traffic routed through the gateway except where a more specific HTTPRoute AuthPolicy exists, in which case the HTTPRoute AuthPolicy prevails.

Example with 4 AuthPolicies, 3 HTTPRoutes and 1 Gateway default (plus 2 HTTPRoute and 2 Gateways without AuthPolicies attached):

  • AuthPolicy A \u2192 HTTPRoute A (a.toystore.com) \u2192 Gateway G (*.com)
  • AuthPolicy B \u2192 HTTPRoute B (b.toystore.com) \u2192 Gateway G (*.com)
  • AuthPolicy W \u2192 HTTPRoute W (*.toystore.com) \u2192 Gateway G (*.com)
  • AuthPolicy G (defaults) \u2192 Gateway G (*.com)

Expected behavior:

  • Request to a.toystore.com \u2192 AuthPolicy A will be enforced
  • Request to b.toystore.com \u2192 AuthPolicy B will be enforced
  • Request to other.toystore.com \u2192 AuthPolicy W will be enforced
  • Request to other.com (suppose a route exists) \u2192 AuthPolicy G will be enforced
  • Request to yet-another.net (suppose a route and gateway exist) \u2192 No AuthPolicy will be enforced

Gateway AuthPolicies that declare overrides protect all traffic routed through the gateway, regardless of existence of any more specific HTTPRoute AuthPolicy.

Example with 4 AuthPolicies, 3 HTTPRoutes and 1 Gateway override (plus 2 HTTPRoute and 2 Gateways without AuthPolicies attached):

  • AuthPolicy A \u2192 HTTPRoute A (a.toystore.com) \u2192 Gateway G (*.com)
  • AuthPolicy B \u2192 HTTPRoute B (b.toystore.com) \u2192 Gateway G (*.com)
  • AuthPolicy W \u2192 HTTPRoute W (*.toystore.com) \u2192 Gateway G (*.com)
  • AuthPolicy G (overrides) \u2192 Gateway G (*.com)

Expected behavior:

  • Request to a.toystore.com \u2192 AuthPolicy G will be enforced
  • Request to b.toystore.com \u2192 AuthPolicy G will be enforced
  • Request to other.toystore.com \u2192 AuthPolicy G will be enforced
  • Request to other.com (suppose a route exists) \u2192 AuthPolicy G will be enforced
  • Request to yet-another.net (suppose a route and gateway exist) \u2192 No AuthPolicy will be enforced
"},{"location":"kuadrant-operator/doc/overviews/auth/#when-conditions","title":"when conditions","text":"

when conditions can be used to scope an AuthPolicy or auth rule within an AuthPolicy (i.e. to filter the traffic to which a policy or policy rule applies) without any coupling to the underlying network topology.

Use when conditions to conditionally activate policies and policy rules based on attributes that cannot be expressed in the HTTPRoutes' spec.hostnames and spec.rules.matches fields, or in general in AuthPolicies that target a Gateway.

when conditions in an AuthPolicy are compatible with Authorino conditions, thus supporting complex boolean expressions with AND and OR operators, as well as grouping.

The selectors within the when conditions of an AuthPolicy are a subset of Kuadrant's Well-known Attributes (RFC 0002). Check out the reference for the full list of supported selectors.

Authorino JSON path string modifiers can also be applied to the selectors within the when conditions of an AuthPolicy.

"},{"location":"kuadrant-operator/doc/overviews/auth/#examples","title":"Examples","text":"

Check out the following user guides for examples of protecting services with Kuadrant:

  • Enforcing authentication & authorization with Kuadrant AuthPolicy, for app developers and platform engineers
  • Authenticated Rate Limiting for Application Developers
  • Authenticated Rate Limiting with JWTs and Kubernetes RBAC
"},{"location":"kuadrant-operator/doc/overviews/auth/#known-limitations","title":"Known limitations","text":"
  • One HTTPRoute can only be targeted by one AuthPolicy.
  • One Gateway can only be targeted by one AuthPolicy.
  • AuthPolicies can only target HTTPRoutes/Gateways defined within the same namespace of the AuthPolicy.
  • 2+ AuthPolicies cannot target network resources that define/inherit the same exact hostname.
"},{"location":"kuadrant-operator/doc/overviews/auth/#limitation-multiple-network-resources-with-identical-hostnames","title":"Limitation: Multiple network resources with identical hostnames","text":"

Kuadrant currently does not support multiple AuthPolicies simultaneously targeting network resources that declare identical hostnames. This includes multiple HTTPRoutes that specify the same hostnames in the spec.hostnames field, as well as HTTPRoutes that specify a hostname that is identical to a hostname specified in a listener of one of the route's parent gateways or HTTPRoutes that don't specify any hostname at all thus inheriting the hostnames from the parent gateways. In any of these cases, a maximum of one AuthPolicy targeting any of those resources that specify identical hostnames is allowed.

Moreover, having multiple resources that declare identical hostnames may lead to unexpected behavior and therefore should be avoided.

This limitation is rooted at the underlying components configured by Kuadrant for the implementation of its policies and the lack of information in the data plane regarding the exact route that is honored by the API gateway at each specific request, in cases of conflicting hostnames.

To exemplify one way this limitation can impact deployments, consider the following topology:

                 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                 \u2502   Gateway    \u2502\n                 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n          \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners:   \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n          \u2502      \u2502 - host: *.io \u2502       \u2502\n          \u2502      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518       \u2502\n          \u2502                             \u2502\n          \u2502                             \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510        \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502     HTTPRoute     \u2502        \u2502     HTTPRoute     \u2502\n\u2502     (route-a)     \u2502        \u2502     (route-b)     \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524        \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames:        \u2502        \u2502 hostnames:        \u2502\n\u2502 - app.io          \u2502        \u2502 - app.io          \u2502\n\u2502 rules:            \u2502        \u2502 rules:            \u2502\n\u2502 - matches:        \u2502        \u2502 - matches:        \u2502\n\u2502   - path:         \u2502        \u2502   - path:         \u2502\n\u2502       value: /foo \u2502        \u2502       value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518        \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n          \u25b2                            \u25b2\n          \u2502                            \u2502\n    \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510               \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n    \u2502 AuthPolicy \u2502               \u2502 AuthPolicy \u2502\n    \u2502 (policy-1) \u2502               \u2502 (policy-2) \u2502\n    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518               \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n

In the example above, with the policy-1 resource created before policy-2, policy-1 will be enforced on all requests to app.io/foo while policy-2 will be rejected. I.e. app.io/bar will not be secured. In fact, the status conditions of policy-2 shall reflect Enforced=false with message \"AuthPolicy has encountered some issues: AuthScheme is not ready yet\".

Notice the enforcement of policy-1 and no enforcement of policy-2 is the opposite behavior as the analogous problem with the Kuadrant RateLimitPolicy.

A slightly different way the limitation applies is when two or more routes of a gateway declare the exact same hostname and a gateway policy is defined with expectation to set default rules for the cases not covered by more specific policies. E.g.:

                                    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                         \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 AuthPolicy \u2502\n                         \u2502          \u2502 (policy-2) \u2502\n                         \u25bc          \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                 \u2502   Gateway    \u2502\n                 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n          \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners:   \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n          \u2502      \u2502 - host: *.io \u2502       \u2502\n          \u2502      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518       \u2502\n          \u2502                             \u2502\n          \u2502                             \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510        \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502     HTTPRoute     \u2502        \u2502     HTTPRoute     \u2502\n\u2502     (route-a)     \u2502        \u2502     (route-b)     \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524        \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames:        \u2502        \u2502 hostnames:        \u2502\n\u2502 - app.io          \u2502        \u2502 - app.io          \u2502\n\u2502 rules:            \u2502        \u2502 rules:            \u2502\n\u2502 - matches:        \u2502        \u2502 - matches:        \u2502\n\u2502   - path:         \u2502        \u2502   - path:         \u2502\n\u2502       value: /foo \u2502        \u2502       value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518        \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n          \u25b2\n          \u2502\n    \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n    \u2502 AuthPolicy \u2502\n    \u2502 (policy-1) \u2502\n    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n

Once again, requests to app.io/foo will be protected under AuthPolicy policy-1, while requests to app.io/bar will not be protected under any policy at all, unlike expected gateway policy policy-2 enforced as default. Both policies will report status condition as Enforced nonetheless.

To avoid these problems, use different hostnames in each route.

"},{"location":"kuadrant-operator/doc/overviews/auth/#implementation-details","title":"Implementation details","text":"

Under the hood, for each AuthPolicy, Kuadrant creates an Istio AuthorizationPolicy and an Authorino AuthConfig custom resources.

Only requests that matches the rules in the Istio AuthorizationPolicy cause an authorization request to be sent to the external authorization service (\"Authorino\"), i.e., only requests directed to the HTTPRouteRules targeted by the AuthPolicy (directly or indirectly), according to the declared top-level route selectors (if present), or all requests for which a matching HTTPRouteRule exists (otherwise).

Authorino looks up for the auth scheme (AuthConfig custom resource) to enforce using the provided hostname of the original request as key. It then checks again if the request matches at least one of the selected HTTPRouteRules, in which case it enforces the auth scheme.

Exception to the rule Due to limitations imposed by the Istio `AuthorizationPolicy`, there are a few patterns of HTTPRouteRules that cannot be translated to filters for the external authorization request. Therefore, the following patterns used in HTTPRouteMatches of top-level route selectors of an AuthPolicy will not be included in the Istio AuthorizationPolicy rules that trigger the check request with Authorino: `PathMatchRegularExpression`, `HeaderMatchRegularExpression`, and `HTTPQueryParamMatch`. As a consequence to the above, requests that do not match these rules and otherwise would not be checked with Authorino will result in a request to the external authorization service. Authorino nonetheless will still verify those patterns and ensure the auth scheme is enforced only when it matches a selected HTTPRouteRule. Users of Kuadrant may observe an unnecessary call to the authorization service in those cases where the request is out of the scope of the AuthPolicy and therefore always authorized."},{"location":"kuadrant-operator/doc/overviews/auth/#internal-custom-resources-and-namespaces","title":"Internal custom resources and namespaces","text":"

While the Istio AuthorizationPolicy needs to be created in the same namespace as the gateway workload, the Authorino AuthConfig is created in the namespace of the AuthPolicy itself. This allows to simplify references such as to Kubernetes Secrets referred in the AuthPolicy, as well as the RBAC to support the architecture.

"},{"location":"kuadrant-operator/doc/overviews/development/","title":"Development Guide","text":""},{"location":"kuadrant-operator/doc/overviews/development/#technology-stack-required-for-development","title":"Technology stack required for development","text":"
  • operator-sdk version v1.32.0
  • kind version v0.23.0
  • git
  • go version 1.22+
  • kubernetes version v1.19+
  • kubectl version v1.19+
  • helm
"},{"location":"kuadrant-operator/doc/overviews/development/#build","title":"Build","text":"
make build\n
"},{"location":"kuadrant-operator/doc/overviews/development/#deploy-on-local-kubernetes-cluster","title":"Deploy on local kubernetes cluster","text":"

Run local Kubernetes cluster using Docker container using Kind and deploy kuadrant operator (and all dependencies) in a single command.

make local-setup\n

The make local-setup target accepts the following variables:

Makefile Variable Description Default value GATEWAYAPI_PROVIDER GatewayAPI provider name. Accepted values: [istio | envoygateway] istio"},{"location":"kuadrant-operator/doc/overviews/development/#run-as-a-local-process","title":"Run as a local process","text":"

Run local Kubernetes cluster using Docker container using Kind and deploy all dependencies in a single command.

make local-env-setup\n

The make local-env-setup target accepts the following variables:

Makefile Variable Description Default value GATEWAYAPI_PROVIDER GatewayAPI provider name. Accepted values: [istio | envoygateway] istio

Then, run the operator locally

make run\n
"},{"location":"kuadrant-operator/doc/overviews/development/#deploy-on-existing-kubernetes-cluster","title":"Deploy on existing kubernetes cluster","text":"

Requirements:

  • Active session open to the kubernetes cluster.
  • GatewayAPI installed
  • GatewayAPI provider installed. Currently only Istio and EnvoyGateway supported.
  • Cert Manager installed

Before running the kuadrant operator, some dependencies needs to be deployed.

make install\nmake deploy-dependencies\n

Then, deploy the operator

make deploy\n
"},{"location":"kuadrant-operator/doc/overviews/development/#deploy-kuadrant-operator-using-olm","title":"Deploy kuadrant operator using OLM","text":"

You can deploy kuadrant using OLM just running few commands. No need to build any image. Kuadrant engineering team provides latest and release version tagged images. They are available in the Quay.io/Kuadrant image repository.

Create kind cluster

make kind-create-cluster\n

Deploy OLM system

make install-olm\n

Deploy kuadrant using OLM. The make deploy-catalog target accepts the following variables:

Makefile Variable Description Default value CATALOG_IMG Kuadrant operator catalog image URL quay.io/kuadrant/kuadrant-operator-catalog:latest
make deploy-catalog [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-catalog:latest]\n
"},{"location":"kuadrant-operator/doc/overviews/development/#build-custom-olm-catalog","title":"Build custom OLM catalog","text":"

If you want to deploy (using OLM) a custom kuadrant operator, you need to build your own catalog. Furthermore, if you want to deploy a custom limitador or authorino operator, you also need to build your own catalog. The kuadrant operator bundle includes the authorino or limtador operator dependency version, hence using other than latest version requires a custom kuadrant operator bundle and a custom catalog including the custom bundle.

"},{"location":"kuadrant-operator/doc/overviews/development/#build-kuadrant-operator-bundle-image","title":"Build kuadrant operator bundle image","text":"

The make bundle target accepts the following variables:

Makefile Variable Description Default value Notes IMG Kuadrant operator image URL quay.io/kuadrant/kuadrant-operator:latest TAG var could be use to build this URL, defaults to latest if not provided VERSION Bundle version 0.0.0 LIMITADOR_OPERATOR_BUNDLE_IMG Limitador operator bundle URL quay.io/kuadrant/limitador-operator-bundle:latest LIMITADOR_OPERATOR_VERSION var could be used to build this, defaults to latest if not provided AUTHORINO_OPERATOR_BUNDLE_IMG Authorino operator bundle URL quay.io/kuadrant/authorino-operator-bundle:latest AUTHORINO_OPERATOR_VERSION var could be used to build this, defaults to latest if not provided DNS_OPERATOR_BUNDLE_IMG DNS operator bundle URL quay.io/kuadrant/dns-operator-bundle:latest DNS_OPERATOR_BUNDLE_IMG var could be used to build this, defaults to latest if not provided RELATED_IMAGE_WASMSHIM WASM shim image URL oci://quay.io/kuadrant/wasm-shim:latest WASM_SHIM_VERSION var could be used to build this, defaults to latest if not provided RELATED_IMAGE_CONSOLEPLUGIN ConsolePlugin image URL quay.io/kuadrant/console-plugin:latest CHANNELS Bundle channels used in the bundle, comma separated alpha DEFAULT_CHANNEL The default channel used in the bundle alpha
  • Build the bundle manifests
make bundle [IMG=quay.io/kuadrant/kuadrant-operator:latest] \\\n            [VERSION=0.0.0] \\\n            [LIMITADOR_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \\\n            [AUTHORINO_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/authorino-operator-bundle:latest] \\\n            [DNS_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/dns-operator-bundle:latest] \\\n            [RELATED_IMAGE_WASMSHIM=oci://quay.io/kuadrant/wasm-shim:latest] \\\n            [RELATED_IMAGE_CONSOLEPLUGIN=quay.io/kuadrant/console-plugin:latest] \\\n            [CHANNELS=alpha] \\\n            [DEFAULT_CHANNEL=alpha]\n
  • Build the bundle image from the manifests
Makefile Variable Description Default value BUNDLE_IMG Kuadrant operator bundle image URL quay.io/kuadrant/kuadrant-operator-bundle:latest
make bundle-build [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]\n
  • Push the bundle image to a registry
Makefile Variable Description Default value BUNDLE_IMG Kuadrant operator bundle image URL quay.io/kuadrant/kuadrant-operator-bundle:latest
make bundle-push [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]\n

Frequently, you may need to build custom kuadrant bundle with the default (latest) Limitador and Authorino bundles. These are the example commands to build the manifests, build the bundle image and push to the registry.

In the example, a new kuadrant operator bundle version 0.8.0 will be created that references the kuadrant operator image quay.io/kuadrant/kuadrant-operator:v0.5.0 and latest Limitador and Authorino bundles.

# manifests\nmake bundle IMG=quay.io/kuadrant/kuadrant-operator:v0.5.0 VERSION=0.8.0\n\n# bundle image\nmake bundle-build BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:my-bundle\n\n# push bundle image\nmake bundle-push BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:my-bundle\n
"},{"location":"kuadrant-operator/doc/overviews/development/#build-custom-catalog","title":"Build custom catalog","text":"

The catalog's format will be File-based Catalog.

Make sure all the required bundles are pushed to the registry. It is required by the opm tool.

The make catalog target accepts the following variables:

Makefile Variable Description Default value BUNDLE_IMG Kuadrant operator bundle image URL quay.io/kuadrant/kuadrant-operator-bundle:latest LIMITADOR_OPERATOR_BUNDLE_IMG Limitador operator bundle URL quay.io/kuadrant/limitador-operator-bundle:latest AUTHORINO_OPERATOR_BUNDLE_IMG Authorino operator bundle URL quay.io/kuadrant/authorino-operator-bundle:latest DNS_OPERATOR_BUNDLE_IMG DNS operator bundle URL quay.io/kuadrant/dns-operator-bundle:latest DEFAULT_CHANNEL Catalog default channel alpha
make catalog [BUNDLE_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest] \\\n            [LIMITADOR_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] \\\n            [AUTHORINO_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/authorino-operator-bundle:latest] \\\n            [DNS_OPERATOR_BUNDLE_IMG=quay.io/kuadrant/dns-operator-bundle:latest] \\\n            [DEFAULT_CHANNEL=alpha]\n
  • Build the catalog image from the manifests
Makefile Variable Description Default value CATALOG_IMG Kuadrant operator catalog image URL quay.io/kuadrant/kuadrant-operator-catalog:latest
make catalog-build [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-catalog:latest]\n
  • Push the catalog image to a registry
make catalog-push [CATALOG_IMG=quay.io/kuadrant/kuadrant-operator-bundle:latest]\n

You can try out your custom catalog image following the steps of the Deploy kuadrant operator using OLM section.

"},{"location":"kuadrant-operator/doc/overviews/development/#cleaning-up","title":"Cleaning up","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/doc/overviews/development/#run-tests","title":"Run tests","text":""},{"location":"kuadrant-operator/doc/overviews/development/#unittests","title":"Unittests","text":"
make test-unit\n

Optionally, add TEST_NAME makefile variable to run specific test

make test-unit TEST_NAME=TestLimitIndexEquals\n

or even subtest

make test-unit TEST_NAME=TestLimitIndexEquals/empty_indexes_are_equal\n
"},{"location":"kuadrant-operator/doc/overviews/development/#integration-tests","title":"Integration tests","text":"

Multiple controller integration tests are defined

Golang package Required environment Makefile env setup target Makefile test run target github.com/kuadrant/kuadrant-operator/tests/bare_k8s no gateway provider, no GatewayAPI CRDs. Just Kuadrant API and Kuadrant dependencies. make local-k8s-env-setup make test-bare-k8s-integration github.com/kuadrant/kuadrant-operator/tests/gatewayapi no gateway provider. GatewayAPI CRDs, Kuadrant API and Kuadrant dependencies. make local-gatewayapi-env-setup make test-gatewayapi-env-integration github.com/kuadrant/kuadrant-operator/controllers at least one gatewayapi provider. It can be any: istio, envoygateway, ... make local-env-setup GATEWAYAPI_PROVIDER=[istio \\| envoygateway] [ISTIO_INSTALL_SAIL=false] (Default istio) make test-integration GATEWAYAPI_PROVIDER=[istio \\| envoygateway] (Default istio) github.com/kuadrant/kuadrant-operator/tests/istio GatewayAPI CRDs, Istio, Kuadrant API and Kuadrant dependencies. make local-env-setup GATEWAYAPI_PROVIDER=istio [ISTIO_INSTALL_SAIL=false] make test-istio-env-integration github.com/kuadrant/kuadrant-operator/tests/envoygateway GatewayAPI CRDs, EnvoyGateway, Kuadrant API and Kuadrant dependencies. make local-env-setup GATEWAYAPI_PROVIDER=envoygateway make test-envoygateway-env-integration"},{"location":"kuadrant-operator/doc/overviews/development/#lint-tests","title":"Lint tests","text":"
make run-lint\n
"},{"location":"kuadrant-operator/doc/overviews/development/#uninstall-kuadrant-crds","title":"(Un)Install Kuadrant CRDs","text":"

You need an active session open to a kubernetes cluster.

Remove CRDs

make uninstall\n
"},{"location":"kuadrant-operator/doc/overviews/dns/","title":"Kuadrant DNS","text":"

A Kuadrant DNSPolicy custom resource:

Targets Gateway API networking resources Gateways to provide dns management by managing the lifecycle of dns records in external dns providers such as AWS Route53 and Google DNS.

"},{"location":"kuadrant-operator/doc/overviews/dns/#how-it-works","title":"How it works","text":"

A DNSPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external DNS service. The needed dns names are gathered from the listener definitions and the IPAdresses | CNAME hosts are gathered from the status block of the gateway resource.

"},{"location":"kuadrant-operator/doc/overviews/dns/#the-dnspolicy-custom-resource","title":"The DNSPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/overviews/dns/#overview","title":"Overview","text":"

The DNSPolicy spec includes the following parts:

  • A reference to an existing Gateway API resource (spec.targetRef)
  • LoadBalancing specification (spec.loadBalancing)
  • HealthCheck specification (spec.healthCheck)

Check out the API reference for a full specification of the DNSPolicy CRD.

"},{"location":"kuadrant-operator/doc/overviews/dns/#using-the-dnspolicy","title":"Using the DNSPolicy","text":""},{"location":"kuadrant-operator/doc/overviews/dns/#dns-provider-setup","title":"DNS Provider Setup","text":"

A DNSPolicy acts against a target Gateway by processing its listeners for hostnames that it can create dns records for. In order for it to do this, it must know about the dns provider. This is done through the creation of dns provider secrets containing the credentials and configuration for the dns provider account.

If for example a Gateway is created with a listener with a hostname of echo.apps.hcpapps.net:

apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: my-gw\nspec:\n  listeners:\n\n    - allowedRoutes:\n        namespaces:\n          from: All\n      name: api\n      hostname: echo.apps.hcpapps.net\n      port: 80\n      protocol: HTTP\n

In order for the DNSPolicy to act upon that listener, a DNS provider Secret must exist for that hostnames' domain.

apiVersion: v1\nkind: Secret\nmetadata:\n  name: my-aws-credentials\n  namespace: <Gateway Namespace>\ndata:\n  AWS_ACCESS_KEY_ID: <AWS_ACCESS_KEY_ID>\n  AWS_REGION: <AWS_REGION>\n  AWS_SECRET_ACCESS_KEY: <AWS_SECRET_ACCESS_KEY>\ntype: kuadrant.io/aws\n

By default, Kuadrant will list the available zones and find the matching zone based on the listener host in the gateway listener. If it finds more than one matching zone for a given listener host, it will not update any of those zones. When providing a credential you should limit that credential down to just have write access to the zones you want Kuadrant to manage. Below is an example of a an AWS policy for doing this type of thing:

{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Sid\": \"VisualEditor0\",\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"route53:ListTagsForResources\",\n                \"route53:GetHealthCheckLastFailureReason\",\n                \"route53:GetHealthCheckStatus\",\n                \"route53:GetChange\",\n                \"route53:GetHostedZone\",\n                \"route53:ChangeResourceRecordSets\",\n                \"route53:ListResourceRecordSets\",\n                \"route53:GetHealthCheck\",\n                \"route53:UpdateHostedZoneComment\",\n                \"route53:UpdateHealthCheck\",\n                \"route53:CreateHealthCheck\",\n                \"route53:DeleteHealthCheck\",\n                \"route53:ListTagsForResource\",\n                \"route53:ListHealthChecks\",\n                \"route53:GetGeoLocation\",\n                \"route53:ListGeoLocations\",\n                \"route53:ListHostedZonesByName\",\n                \"route53:GetHealthCheckCount\"\n            ],\n            \"Resource\": [\n                \"arn:aws:route53:::hostedzone/Z08187901Y93585DDGM6K\",\n                \"arn:aws:route53:::healthcheck/*\",\n                \"arn:aws:route53:::change/*\"\n            ]\n        },\n        {\n            \"Sid\": \"VisualEditor1\",\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"route53:ListHostedZones\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n
"},{"location":"kuadrant-operator/doc/overviews/dns/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"

When a DNSPolicy targets a Gateway, the policy will be enforced on all gateway listeners.

Target a Gateway by setting the spec.targetRef field of the DNSPolicy as follows:

apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: <DNSPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n
"},{"location":"kuadrant-operator/doc/overviews/dns/#targeting-a-specific-listener-of-a-gateway","title":"Targeting a specific Listener of a gateway","text":"

A DNSPolicy can target a specific listener in a gateway using the sectionName property of the targetRef configuration. When you set the sectionName, the DNSPolicy will only affect that listener and no others. If you also have another DNSPolicy targeting the entire gateway, the more specific policy targeting the listerner will be the policy that is applied.

apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: <DNSPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n    sectionName: <myListenerName>\n
"},{"location":"kuadrant-operator/doc/overviews/dns/#dnsrecord-resource","title":"DNSRecord Resource","text":"

The DNSPolicy will create a DNSRecord resource for each listener hostname. The DNSPolicy resource uses the status of the Gateway to determine what dns records need to be created based on the clusters it has been placed onto.

Given the following multi cluster gateway status:

status:\n  addresses:\n\n    - type: kuadrant.io/MultiClusterIPAddress\n      value: kind-mgc-workload-1/172.31.201.1\n    - type: kuadrant.io/MultiClusterIPAddress\n      value: kind-mgc-workload-2/172.31.202.1\n  listeners:\n    - attachedRoutes: 1\n      conditions: []\n      name: kind-mgc-workload-1.api\n      supportedKinds: []\n    - attachedRoutes: 1\n      conditions: []\n      name: kind-mgc-workload-2.api\n      supportedKinds: []\n

A DNSPolicy targeting this gateway would create an appropriate DNSRecord based on the routing strategy selected.

"},{"location":"kuadrant-operator/doc/overviews/dns/#loadbalanced","title":"loadbalanced","text":"
apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n  name: echo.apps.hcpapps.net\n  namespace: <Gateway Namespace>\nspec:\n  endpoints:\n\n    - dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n      recordTTL: 60\n      recordType: A\n      targets:\n        - 172.31.202.1\n    - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n      providerSpecific:\n        - name: weight\n          value: \"120\"\n      recordTTL: 60\n      recordType: CNAME\n      setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n      targets:\n        - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n    - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n      providerSpecific:\n        - name: weight\n          value: \"120\"\n      recordTTL: 60\n      recordType: CNAME\n      setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n      targets:\n        - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n    - dnsName: echo.apps.hcpapps.net\n      recordTTL: 300\n      recordType: CNAME\n      targets:\n        - lb-2903yb.echo.apps.hcpapps.net\n    - dnsName: lb-2903yb.echo.apps.hcpapps.net\n      providerSpecific:\n        - name: geo-country-code\n          value: '*'\n      recordTTL: 300\n      recordType: CNAME\n      setIdentifier: default\n      targets:\n        - default.lb-2903yb.echo.apps.hcpapps.net\n    - dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n      recordTTL: 60\n      recordType: A\n      targets:\n        - 172.31.201.1\n  providerRefs:\n    - name: my-aws-credentials\n

After DNSRecord reconciliation the listener hostname should be resolvable through dns:

dig echo.apps.hcpapps.net +short\nlb-2903yb.echo.apps.hcpapps.net.\ndefault.lb-2903yb.echo.apps.hcpapps.net.\nlrnse3.lb-2903yb.echo.apps.hcpapps.net.\n172.31.201.1\n
"},{"location":"kuadrant-operator/doc/overviews/dns/#simple","title":"simple","text":"
apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n  name: echo.apps.hcpapps.net\n  namespace: <Gateway Namespace>\nspec:\n  endpoints:\n\n    - dnsName: echo.apps.hcpapps.net\n      recordTTL: 60\n      recordType: A\n      targets:\n        - 172.31.201.1\n        - 172.31.202.1\n  providerRefs:\n    - name: my-aws-credentials\n

After DNSRecord reconciliation the listener hostname should be resolvable through dns:

dig echo.apps.hcpapps.net +short\n172.31.201.1\n
"},{"location":"kuadrant-operator/doc/overviews/dns/#known-limitations","title":"Known limitations","text":"
  • One Gateway can only be targeted by one DNSPolicy unless subsequent DNSPolicies choose to specific a sectionName in their targetRef.
  • DNSPolicies can only target Gateways defined within the same namespace of the DNSPolicy.
"},{"location":"kuadrant-operator/doc/overviews/dns/#troubleshooting","title":"Troubleshooting","text":""},{"location":"kuadrant-operator/doc/overviews/dns/#understanding-status","title":"Understanding status","text":"

The Status.Conditions on DNSPolicy mostly serves as an aggregation of the DNSRecords conditions. The DNSPolicy conditions:

  • Accepted indicates that policy was validated and is accepted by the controller for the reconciliation.
  • Enforced indicates that the controller acted upon the policy. If DNSRecords were created as the result this condition will reflect the Ready condition on the record. This condition is removed if Accepted is false. If partially enforced, the condition will be set to True
  • SubResourcesHealthy reflects Healthy conditions of sub-resources. This condition is removed if Accepted is false. If partially healthy, the condition will be set to False

The Status.Conditions on the DNSRecord are as follows:

  • Ready indicates that the record was successfully published to the provider.
  • Healthy indicates that dnshealthcheckprobes are healthy. If not all probes are healthy, the condition will be set to False
"},{"location":"kuadrant-operator/doc/overviews/dns/#logs","title":"Logs","text":"

To increase the log level of the kuadran-operator refer to this logging doc.

To increase the log level of the dns-operator-controller-manager and for the examples on log queries refer to the logging section in the DNS Operator readme

"},{"location":"kuadrant-operator/doc/overviews/dns/#debugging","title":"Debugging","text":"

This section will provide the typical sequence of actions during the troubleshooting. It is meant to be a reference to identifying the problem rather than SOP.

"},{"location":"kuadrant-operator/doc/overviews/dns/#list-policies-to-identify-the-failing-one","title":"List policies to identify the failing one","text":"
kubectl get dnspolicy -A -o wide\n
"},{"location":"kuadrant-operator/doc/overviews/dns/#inspect-the-failing-policy","title":"Inspect the failing policy","text":"

kubectl get dnspolicy <dnspolicy-name> -n <dnspolicy-namespace> -o yaml | yq '.status.conditions'\n
The output will show which DNSRecords and for what reasons are failing. For example:
- lastTransitionTime: \"2024-12-04T09:46:22Z\"\n  message: DNSPolicy has been accepted\n  reason: Accepted\n  status: \"True\"\n  type: Accepted\n- lastTransitionTime: \"2024-12-04T09:46:29Z\"\n  message: 'DNSPolicy has been partially enforced. Not ready DNSRecords are: test-api '\n  reason: Enforced\n  status: \"True\"\n  type: Enforced\n- lastTransitionTime: \"2024-12-04T09:46:27Z\"\n  message: 'DNSPolicy has encountered some issues: not all sub-resources of policy are passing the policy defined health check. Not healthy DNSRecords are: test-api '\n  reason: Unknown\n  status: \"False\"\n  type: SubResourcesHealthy\n
This example indicates that the policy was accepted and one of the DNSRecords - test-api DNSRecord - is not ready and not healthy

"},{"location":"kuadrant-operator/doc/overviews/dns/#locate-sub-records-to-confirm-conditions","title":"Locate sub-records to confirm conditions","text":"

This ensures that the Kuadrand operator propagated status correctly. The names of the DNSRecords are composed of the Gateway name followed by a listener name and are created in the DNSPolicy namespace.

kubectl get dnsrecord -n <dnspolicy-namespace> \n

"},{"location":"kuadrant-operator/doc/overviews/dns/#inspect-the-record-to-get-more-detailed-information-on-the-failure","title":"Inspect the record to get more detailed information on the failure","text":"

kubectl get dnsrecord <dnsrecord-name> -n <dnspolicy-namespace> -o yaml | yq '.status'\n
Most of the time the conditions will hold all necessary information. However, it is advised to pay attention to the queuedAt and validFor field to understand when the record was processed and when controller expects it to be reconciled again.

"},{"location":"kuadrant-operator/doc/overviews/dns/#inspect-health-check-probes","title":"Inspect health check probes","text":"

We create a probe per address per dns record. The name of the probe is DNSRecord name followed by an address.

# list probes \nkubectl get dnshealthcheckprobe -n <dnspolicy-namespace>\n# inspect the probe \nkubectl get dnshealthcheckprobe <probe-name> -n <dnspolicy-namespace> -o yaml | yq '.status'\n

"},{"location":"kuadrant-operator/doc/overviews/dns/#identify-what-in-logs-to-look-for","title":"Identify what in logs to look for","text":"

There are two operators to look into and a number of controllers. The commands above should provide an understanding of what component/process is failing. Use the following to identify the correct controller:

  • If the problem in the status propagation from the DNSRecord to the DNSPolicy or in the creation of the DNSRecord: kuadrant-operator logs under kuadrant-operator.EffectiveDNSPoliciesReconciler reconciler
  • If the problem is in publishing DNSRecord or reacting to the healtcheckprobe CR: dns-operator-controller-manager logs under dnsrecord_controller reconciler
  • If the problem in creation of the probes: dns-operator-controller-manager logs under dnsrecord_controller.healthchecks reconciler
  • If the problem is in the execution of the healthchecks: dns-operator-controller-manager logs under dnsprobe_controller reconciler
"},{"location":"kuadrant-operator/doc/overviews/logging/","title":"Logging","text":"

The kuadrant operator outputs 3 levels of log messages: (from lowest to highest level)

  1. debug
  2. info (default)
  3. error

info logging is restricted to high-level information. Actions like creating, deleteing or updating kubernetes resources will be logged with reduced details about the corresponding objects, and without any further detailed logs of the steps in between, except for errors.

Only debug logging will include processing details.

To configure the desired log level, set the environment variable LOG_LEVEL to one of the supported values listed above. Default log level is info.

Apart from log level, the operator can output messages to the logs in 2 different formats:

  • production (default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
  • development: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}

To configure the desired log mode, set the environment variable LOG_MODE to one of the supported values listed above. Default log level is production.

"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/","title":"Kuadrant Rate Limiting","text":"

A Kuadrant RateLimitPolicy custom resource, often abbreviated \"RateLimitPolicy\":

  1. Targets Gateway API networking resources such as HTTPRoutes and Gateways, using these resources to obtain additional context, i.e., which traffic workload (HTTP attributes, hostnames, user attributes, etc) to rate limit.
  2. Supports targeting subsets (sections) of a network resource to apply the limits to.
  3. Abstracts the details of the underlying Rate Limit protocol and configuration resources, that have a much broader remit and surface area.
  4. Enables cluster operators to set defaults that govern behavior at the lower levels of the network, until a more specific policy is applied.
"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#envoys-rate-limit-service-protocol","title":"Envoy's Rate Limit Service Protocol","text":"

Kuadrant's Rate Limit implementation relies on the Envoy's Rate Limit Service (RLS) protocol. The workflow per request goes:

  1. On incoming request, the gateway checks the matching rules for enforcing rate limits, as stated in the RateLimitPolicy custom resources and targeted Gateway API networking objects
  2. If the request matches, the gateway sends one RateLimitRequest to the external rate limiting service (\"Limitador\").
  3. The external rate limiting service responds with a RateLimitResponse back to the gateway with either an OK or OVER_LIMIT response code.

A RateLimitPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external rate limiting service.

"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#the-ratelimitpolicy-custom-resource","title":"The RateLimitPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#overview","title":"Overview","text":"

The RateLimitPolicy spec includes, basically, two parts:

  • A reference to an existing Gateway API resource (spec.targetRef)
  • Limit definitions (spec.limits)

Each limit definition includes:

  • A set of rate limits (spec.limits.<limit-name>.rates[])
  • (Optional) A set of dynamic counter qualifiers (spec.limits.<limit-name>.counters[])
  • (Optional) A set of additional dynamic conditions to activate the limit (spec.limits.<limit-name>.when[])

The limit definitions (limits) can be declared at the top-level level of the spec (with the semantics of defaults) or alternatively within explicit defaults or overrides blocks.

Check out Kuadrant RFC 0002 to learn more about the Well-known Attributes that can be used to define counter qualifiers (counters) and conditions (when).

Check out the API reference for a full specification of the RateLimitPolicy CRD.

"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#using-the-ratelimitpolicy","title":"Using the RateLimitPolicy","text":""},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#targeting-a-httproute-networking-resource","title":"Targeting a HTTPRoute networking resource","text":"

When a RateLimitPolicy targets a HTTPRoute, the policy is enforced to all traffic routed according to the rules and hostnames specified in the HTTPRoute, across all Gateways referenced in the spec.parentRefs field of the HTTPRoute.

Target a HTTPRoute by setting the spec.targetRef field of the RateLimitPolicy as follows:

apiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: <RateLimitPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: <HTTPRoute Name>\n  limits: { \u2026 }\n

"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#hostnames-and-wildcards","title":"Hostnames and wildcards","text":"

If a RateLimitPolicy targets a route defined for *.com and another RateLimitPolicy targets another route for api.com, the Kuadrant control plane will not merge these two RateLimitPolicies. Unless one of the policies declare an overrides set of limites, the control plane will configure to mimic the behavior of gateway implementation by which the \"most specific hostname wins\", thus enforcing only the corresponding applicable policies and limit definitions.

E.g., by default, a request coming for api.com will be rate limited according to the rules from the RateLimitPolicy that targets the route for api.com; while a request for other.com will be rate limited with the rules from the RateLimitPolicy targeting the route for *.com.

See more examples in Overlapping Gateway and HTTPRoute RateLimitPolicies.

"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"

A RateLimitPolicy that targets a Gateway can declare a block of defaults (spec.defaults) or a block of overrides (spec.overrides). As a standard, gateway policies that do not specify neither defaults nor overrides, act as defaults.

When declaring defaults, a RateLimitPolicy which targets a Gateway will be enforced to all HTTP traffic hitting the gateway, unless a more specific RateLimitPolicy targeting a matching HTTPRoute exists. Any new HTTPRoute referrencing the gateway as parent will be automatically covered by the default RateLimitPolicy, as well as changes in the existing HTTPRoutes.

Defaults provide cluster operators with the ability to protect the infrastructure against unplanned and malicious network traffic attempt, such as by setting safe default limits on hostnames and hostname wildcards.

Inversely, a gateway policy that specify overrides declares a set of rules to be enforced on all routes attached to the gateway, thus atomically replacing any more specific policy occasionally attached to any of those routes.

Target a Gateway HTTPRoute by setting the spec.targetRef field of the RateLimitPolicy as follows:

apiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: <RateLimitPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n  defaults: # alternatively: `overrides`\n    limits: { \u2026 }\n

"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#overlapping-gateway-and-httproute-ratelimitpolicies","title":"Overlapping Gateway and HTTPRoute RateLimitPolicies","text":"

Two possible semantics are to be considered here \u2013 gateway policy defaults vs gateway policy overrides.

Gateway RateLimitPolicies that declare defaults (or alternatively neither defaults nor overrides) protect all traffic routed through the gateway except where a more specific HTTPRoute RateLimitPolicy exists, in which case the HTTPRoute RateLimitPolicy prevails.

Example with 4 RateLimitPolicies, 3 HTTPRoutes and 1 Gateway default (plus 2 HTTPRoute and 2 Gateways without RateLimitPolicies attached):

  • RateLimitPolicy A \u2192 HTTPRoute A (a.toystore.com) \u2192 Gateway G (*.com)
  • RateLimitPolicy B \u2192 HTTPRoute B (b.toystore.com) \u2192 Gateway G (*.com)
  • RateLimitPolicy W \u2192 HTTPRoute W (*.toystore.com) \u2192 Gateway G (*.com)
  • RateLimitPolicy G (defaults) \u2192 Gateway G (*.com)

Expected behavior:

  • Request to a.toystore.com \u2192 RateLimitPolicy A will be enforced
  • Request to b.toystore.com \u2192 RateLimitPolicy B will be enforced
  • Request to other.toystore.com \u2192 RateLimitPolicy W will be enforced
  • Request to other.com (suppose a route exists) \u2192 RateLimitPolicy G will be enforced
  • Request to yet-another.net (suppose a route and gateway exist) \u2192 No RateLimitPolicy will be enforced

Gateway RateLimitPolicies that declare overrides protect all traffic routed through the gateway, regardless of existence of any more specific HTTPRoute RateLimitPolicy.

Example with 4 RateLimitPolicies, 3 HTTPRoutes and 1 Gateway override (plus 2 HTTPRoute and 2 Gateways without RateLimitPolicies attached):

  • RateLimitPolicy A \u2192 HTTPRoute A (a.toystore.com) \u2192 Gateway G (*.com)
  • RateLimitPolicy B \u2192 HTTPRoute B (b.toystore.com) \u2192 Gateway G (*.com)
  • RateLimitPolicy W \u2192 HTTPRoute W (*.toystore.com) \u2192 Gateway G (*.com)
  • RateLimitPolicy G (overrides) \u2192 Gateway G (*.com)

Expected behavior:

  • Request to a.toystore.com \u2192 RateLimitPolicy G will be enforced
  • Request to b.toystore.com \u2192 RateLimitPolicy G will be enforced
  • Request to other.toystore.com \u2192 RateLimitPolicy G will be enforced
  • Request to other.com (suppose a route exists) \u2192 RateLimitPolicy G will be enforced
  • Request to yet-another.net (suppose a route and gateway exist) \u2192 No RateLimitPolicy will be enforced
"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#limit-definition","title":"Limit definition","text":"

A limit will be activated whenever a request comes in and the request matches:

  • all of the when conditions specified in the limit.

A limit can define:

  • counters that are qualified based on dynamic values fetched from the request, or
  • global counters (implicitly, when no qualified counter is specified)

A limit is composed of one or more rate limits.

E.g.

spec:\n  limits:\n    \"toystore-all\":\n      rates:\n\n        - limit: 5000\n          window: 1s\n\n    \"toystore-api-per-username\":\n      rates:\n\n        - limit: 100\n          window: 1s\n        - limit: 1000\n          window: 1m\n      counters:\n        - expression: auth.identity.username\n      when:\n        - predicate: request.host == 'api.toystore.com'\n\n    \"toystore-admin-unverified-users\":\n      rates:\n\n        - limit: 250\n          window: 1s\n      when:\n        - predicate: request.host == 'admin.toystore.com'\n        - predicate: !auth.identity.email_verified\n
Request to Rate limits enforced api.toystore.com 100rps/username or 1000rpm/username (whatever happens first) admin.toystore.com 250rps other.toystore.com 5000rps"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#when-conditions","title":"when conditions","text":"

when conditions can be used to scope a limit (i.e. to filter the traffic to which a limit definition applies) without any coupling to the underlying network topology, i.e. without making direct references to HTTPRouteRules.

Use when conditions to conditionally activate limits based on attributes that cannot be expressed in the HTTPRoutes' spec.hostnames and spec.rules.matches fields, or in general in RateLimitPolicies that target a Gateway.

The selectors within the when conditions of a RateLimitPolicy are a subset of Kuadrant's Well-known Attributes (RFC 0002). Check out the reference for the full list of supported selectors.

"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#examples","title":"Examples","text":"

Check out the following user guides for examples of rate limiting services with Kuadrant:

  • Simple Rate Limiting for Applications
  • Authenticated Rate Limiting for Application
  • Gateway Rate Limiting for Cluster Operators
  • Authenticated Rate Limiting with JWTs and Kubernetes RBAC
"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#known-limitations","title":"Known limitations","text":"
  • RateLimitPolicies can only target HTTPRoutes/Gateways defined within the same namespace of the RateLimitPolicy.
  • 2+ RateLimitPolicies cannot target network resources that define/inherit the same exact hostname.
"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#implementation-details","title":"Implementation details","text":"

Driven by limitations related to how Istio injects configuration in the filter chains of the ingress gateways, Kuadrant relies on Envoy's Wasm Network filter in the data plane, to manage the integration with rate limiting service (\"Limitador\"), instead of the Rate Limit filter.

Motivation: Multiple rate limit domains

The first limitation comes from having only one filter chain per listener. This often leads to one single global rate limiting filter configuration per gateway, and therefore to a shared rate limit domain across applications and policies. Even though, in a rate limit filter, the triggering of rate limit calls, via actions to build so-called \"descriptors\", can be defined at the level of the virtual host and/or specific route rule, the overall rate limit configuration is only one, i.e., always the same rate limit domain for all calls to Limitador.

On the other hand, the possibility to configure and invoke the rate limit service for multiple domains depending on the context allows to isolate groups of policy rules, as well as to optimize performance in the rate limit service, which can rely on the domain for indexation.

Motivation: Fine-grained matching rules A second limitation of configuring the rate limit filter via Istio, particularly from Gateway API resources, is that rate limit descriptors at the level of a specific HTTP route rule require \"named routes\" \u2013 defined only in an Istio VirtualService resource and referred in an EnvoyFilter one. Because Gateway API HTTPRoute rules lack a \"name\" property1, as well as the Istio VirtualService resources are only ephemeral data structures handled by Istio in-memory in its implementation of gateway configuration for Gateway API, where the names of individual route rules are auto-generated and not referable by users in a policy23, rate limiting by attributes of the HTTP request (e.g., path, method, headers, etc) would be very limited while depending only on Envoy's Rate Limit filter.

Motivated by the desire to support multiple rate limit domains per ingress gateway, as well as fine-grained HTTP route matching rules for rate limiting, Kuadrant implements a wasm-shim that handles the rules to invoke the rate limiting service, complying with Envoy's Rate Limit Service (RLS) protocol.

The wasm module integrates with the gateway in the data plane via Wasm Network filter, and parses a configuration composed out of user-defined RateLimitPolicy resources by the Kuadrant control plane. Whereas the rate limiting service (\"Limitador\") remains an implementation of Envoy's RLS protocol, capable of being integrated directly via Rate Limit extension or by Kuadrant, via wasm module for the Istio Gateway API implementation.

As a consequence of this design:

  • Users can define fine-grained rate limit rules that match their Gateway and HTTPRoute definitions including for subsections of these.
  • Rate limit definitions are insulated, not leaking across unrelated policies or applications.
  • Conditions to activate limits are evaluated in the context of the gateway process, reducing the gRPC calls to the external rate limiting service only to the cases where rate limit counters are known in advance to have to be checked/incremented.
  • The rate limiting service can rely on the indexation to look up for groups of limit definitions and counters.
  • Components remain compliant with industry protocols and flexible for different integration options.

A Kuadrant wasm-shim configuration for one RateLimitPolicy custom resources targeting a HTTPRoute looks like the following and it is generated automatically by the Kuadrant control plane:

apiVersion: extensions.istio.io/v1alpha1\nkind: WasmPlugin\nmetadata:\n  creationTimestamp: \"2024-10-01T16:59:40Z\"\n  generation: 1\n  name: kuadrant-kuadrant-ingressgateway\n  namespace: gateway-system\n  ownerReferences:\n\n    - apiVersion: gateway.networking.k8s.io/v1\n      blockOwnerDeletion: true\n      controller: true\n      kind: Gateway\n      name: kuadrant-ingressgateway\n      uid: 0298355b-fb30-4442-af2b-88d0c05bd2bd\n  resourceVersion: \"11253\"\n  uid: 36ef1fb7-9eca-46c7-af63-fe783f40148c\nspec:\n  phase: STATS\n  pluginConfig:\n    services:\n      ratelimit-service:\n        type: ratelimit\n        endpoint: ratelimit-cluster\n        failureMode: allow\n    actionSets:\n      - name: some_name_0\n        routeRuleConditions:\n          hostnames:\n            - \"*.toystore.website\"\n            - \"*.toystore.io\"\n          predicates:\n            - request.url_path.startsWith(\"/assets\")\n        actions:\n          - service: ratelimit-service\n            scope: gateway-system/app-rlp\n            predicates:\n              - request.host.endsWith('.toystore.website')\n            data:\n              - expression:\n                  key: limit.toystore_assets_all_domains__b61ee8e6\n                  value: \"1\"\n      - name: some_name_1\n        routeRuleConditions:\n          hostnames:\n            - \"*.toystore.website\"\n            - \"*.toystore.io\"\n          predicates:\n            - request.url_path.startsWith(\"/v1\")\n        actions:\n          - service: ratelimit-service\n            scope: gateway-system/app-rlp\n            predicates:\n              - request.host.endsWith('.toystore.website')\n              - auth.identity.username == \"\"\n            data:\n              - expression:\n                  key: limit.toystore_v1_website_unauthenticated__377837ee\n                  value: \"1\"\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: kuadrant-ingressgateway\n  url: oci://quay.io/kuadrant/wasm-shim:latest\n
  1. https://github.com/kubernetes-sigs/gateway-api/pull/996\u00a0\u21a9

  2. https://github.com/istio/istio/issues/36790\u00a0\u21a9

  3. https://github.com/istio/istio/issues/37346\u00a0\u21a9

"},{"location":"kuadrant-operator/doc/overviews/tls/","title":"TLS","text":"

A Kuadrant TLSPolicy custom resource:

Targets Gateway API networking resources Gateways to provide tls for gateway listeners by managing the lifecycle of tls certificates using CertManager.

"},{"location":"kuadrant-operator/doc/overviews/tls/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/overviews/tls/#the-tlspolicy-custom-resource","title":"The TLSPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/overviews/tls/#overview","title":"Overview","text":"

The TLSPolicy spec includes the following parts:

  • A reference to an existing Gateway API resource (spec.targetRef)
"},{"location":"kuadrant-operator/doc/overviews/tls/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"
apiVersion: kuadrant.io/v1\nkind: TLSPolicy\nmetadata:\n  name: my-tls-policy\nspec:\n  # reference to an existing networking resource to attach the policy to\n  # it can only be a Gateway API Gateway resource\n  # it can only refer to objects in the same namespace as the TLSPolicy\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: mygateway\n

Check out the API reference for a full specification of the TLSPolicy CRD.

"},{"location":"kuadrant-operator/doc/overviews/tls/#using-the-tlspolicy","title":"Using the TLSPolicy","text":""},{"location":"kuadrant-operator/doc/overviews/tls/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"

When a TLSPolicy targets a Gateway, the policy will be enforced on all gateway listeners that have a valid TLS section.

Target a Gateway by setting the spec.targetRef field of the TLSPolicy as follows:

apiVersion: kuadrant.io/v1\nkind: TLSPolicy\nmetadata:\n  name: <TLSPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n
"},{"location":"kuadrant-operator/doc/overviews/tls/#examples","title":"Examples","text":"

Check out the following user guides for examples of using the Kuadrant TLSPolicy:

"},{"location":"kuadrant-operator/doc/overviews/tls/#known-limitations","title":"Known limitations","text":""},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/","title":"RLP can target a Gateway resource","text":"

Previous version: https://hackmd.io/IKEYD6NrSzuGQG1nVhwbcw

Based on: https://hackmd.io/_1k6eLCNR2eb9RoSzOZetg

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#introduction","title":"Introduction","text":"

The current RateLimitPolicy CRD already implements a targetRef with a reference to Gateway API's HTTPRoute. This doc captures the design and some implementation details of allowing the targetRef to reference a Gateway API's Gateway.

Having in place this HTTPRoute - Gateway hierarchy, we are also considering to apply Policy Attachment's defaults/overrides approach to the RateLimitPolicy CRD. But for now, it will only be about targeting the Gateway resource.

On designing Kuadrant's rate limiting and considering Istio/Envoy's rate limiting offering, we hit two limitations (described here). Therefore, not giving up entirely in existing Envoy's RateLimit Filter, we decided to move on and leverage the Envoy's Wasm Network Filter and implement rate limiting wasm-shim module compliant with the Envoy's Rate Limit Service (RLS). This wasm-shim module accepts a PluginConfig struct object as input configuration object.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#use-cases-targeting-a-gateway","title":"Use Cases targeting a gateway","text":"

A key use case is being able to provide governance over what service providers can and cannot do when exposing a service via a shared ingress gateway. As well as providing certainty that no service is exposed without my ability as a cluster administrator to protect my infrastructure from unplanned load from badly behaving clients etc.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#goals","title":"Goals","text":"

The goal of this document is to define:

  • The schema of this PluginConfig struct.
  • The kuadrant-operator behavior filling the PluginConfig struct having as input the RateLimitPolicy k8s objects
  • The behavior of the wasm-shim having the PluginConfig struct as input.
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#envoys-rate-limit-service-protocol","title":"Envoy's Rate Limit Service Protocol","text":"

Kuadrant's rate limit relies on the Rate Limit Service (RLS) protocol, hence the gateway generates, based on a set of actions, a set of descriptors (one descriptor is a set of descriptor entries). Those descriptors are send to the external rate limit service provider. When multiple descriptors are provided, the external service provider will limit on ALL of them and return an OVER_LIMIT response if any of them are over limit.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#schema-crd-of-the-ratelimitpolicy","title":"Schema (CRD) of the RateLimitPolicy","text":"
---\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: my-rate-limit-policy\nspec:\n  # Reference to an existing networking resource to attach the policy to. REQUIRED.\n  # It can be a Gateway API HTTPRoute or Gateway resource.\n  # It can only refer to objects in the same namespace as the RateLimitPolicy.\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute / Gateway\n    name: myroute / mygateway\n\n  # The limits definitions to apply to the network traffic routed through the targeted resource.\n  # Equivalent to if otherwise declared within `defaults`.\n  limits:\n    \"my_limit\":\n      # The rate limits associated with this limit definition. REQUIRED.\n      # E.g., to specify a 50rps rate limit, add `{ limit: 50, duration: 1, unit: secod }`\n      rates: [\u2026]\n\n      # Counter qualifiers.\n      # Each dynamic value in the data plane starts a separate counter, combined with each rate limit.\n      # E.g., to define a separate rate limit for each user name detected by the auth layer, add `metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.username`.\n      # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n      counters: [\u2026]\n\n      # Additional dynamic conditions to trigger the limit.\n      # Use it for filtering attributes not supported by HTTPRouteRule or with RateLimitPolicies that target a Gateway.\n      # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n      when: [\u2026]\n\n    # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on\n    # routes that lack a more specific policy attached to.\n    # Mutually exclusive with `overrides` and with declaring `limits` at the top-level of the spec.\n    defaults:\n      limits: {\u2026}\n\n    # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,\n    # thus also overriding any more specific policy occasionally attached to any of those routes.\n    # Mutually exclusive with `defaults` and with declaring `limits` at the top-level of the spec.\n    overrides:\n      limits: {\u2026}\n

.spec.rateLimits holds a list of rate limit configurations represented by the object RateLimit. Each RateLimit object represents a complete rate limit configuration. It contains three fields:

  • rules (optional): Rules allow matching hosts and/or methods and/or paths. Matching occurs when at least one rule applies against the incoming request. If rules are not set, it is equivalent to matching all the requests.

  • configurations (required): Specifies a set of rate limit configurations that could be applied. The rate limit configuration object is the equivalent of the config.route.v3.RateLimit envoy object. One configuration is, in turn, a list of rate limit actions. Each action populates a descriptor entry. A vector of descriptor entries compose a descriptor. Each configuration produces, at most, one descriptor. Depending on the incoming request, one configuration may or may not produce a rate limit descriptor. These rate limiting configuration rules provide flexibility to produce multiple descriptors. For example, you may want to define one generic rate limit descriptor and another descriptor depending on some header. If the header does not exist, the second descriptor is not generated, but traffic keeps being rate limited based on the generic descriptor.

configurations:\n\n  - actions:\n    - request_headers:\n        header_name: \"X-MY-CUSTOM-HEADER\"\n        descriptor_key: \"custom-header\"\n        skip_if_absent: true\n  - actions:\n    - generic_key:\n        descriptor_key: admin\n        descriptor_value: \"1\"\n
  • limits (optional): configuration of the rate limiting service (Limitador). Check out limitador documentation for more information about the fields of each Limit object.

Note: No namespace/domain defined. Kuadrant operator will figure out.

Note: There is no PREAUTH, POSTAUTH stage defined. Ratelimiting filter should be placed after authorization filter to enable authenticated rate limiting. In the future, stage can be implemented.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#kuadrant-operators-behavior","title":"Kuadrant-operator's behavior","text":"

One HTTPRoute can only be targeted by one rate limit policy.

Similarly, one Gateway can only be targeted by one rate limit policy.

However, indirectly, one gateway will be affected by multiple rate limit policies. It is by design of the Gateway API, one gateway can be referenced by multiple HTTPRoute objects. Furthermore, one HTTPRoute can reference multiple gateways.

The kuadrant operator will aggregate all the rate limit policies that apply for each gateway, including RLP targeting HTTPRoutes and Gateways.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#virtualhosting-ratelimitpolicies","title":"\"VirtualHosting\" RateLimitPolicies","text":"

Rate limit policies are scoped by the domains defined at the referenced HTTPRoute's hostnames and Gateway's Listener's Hostname.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#multiple-httproutes-with-the-same-hostname","title":"Multiple HTTPRoutes with the same hostname","text":"

When there are multiple HTTPRoutes with the same hostname, HTTPRoutes are all admitted and envoy merge the routing configuration in the same virtualhost. In these cases, the control plane has to \"merge\" the rate limit configuration into a single entry for the wasm filter.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#overlapping-httproutes","title":"Overlapping HTTPRoutes","text":"

If some RLP targets a route for *.com and other RLP targets another route for api.com, the control plane does not do any merging. A request coming for api.com will be rate limited with the rules from the RLP targeting the route api.com. Also, a request coming for other.com will be rate limited with the rules from the RLP targeting the route *.com.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#examples","title":"examples","text":"

RLP A -> HTTPRoute A (api.toystore.com) -> Gateway G (*.com)

RLP B -> HTTPRoute B (other.toystore.com) -> Gateway G (*.com)

RLP H -> HTTPRoute H (*.toystore.com) -> Gateway G (*.com)

RLP G -> Gateway G (*.com)

Request 1 (api.toystore.com) -> apply RLP A and RLP G

Request 2 (other.toystore.com) -> apply RLP B and RLP G

Request 3 (unknown.toystore.com) -> apply RLP H and RLP G

Request 4 (other.com) -> apply RLP G

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#rate-limit-domain-limitador-namespace","title":"rate limit domain / limitador namespace","text":"

The kuadrant operator will add domain attribute of the Envoy's Rate Limit Service (RLS). It will also add the namespace attribute of the Limitador's rate limit config. The operator will ensure that the associated actions and rate limits have a common domain/namespace.

The value of this domain/namespace seems to be related to the virtualhost for which rate limit applies.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#schema-of-the-wasm-filter-configuration-object-the-pluginconfig","title":"Schema of the WASM filter configuration object: the PluginConfig","text":"

Currently the PluginConfig looks like this:

#  The filter\u2019s behaviour in case the rate limiting service does not respond back. When it is set to true, Envoy will not allow traffic in case of communication failure between rate limiting service and the proxy.\nfailure_mode_deny: true\nratelimitpolicies:\n  default/toystore: # rate limit policy {NAMESPACE/NAME}\n    hosts: # HTTPRoute hostnames\n\n      - '*.toystore.com'\n    rules: # route level actions\n      - operations:\n          - paths:\n              - /admin/toy\n            methods:\n              - POST\n              - DELETE\n        actions:\n          - generic_key:\n              descriptor_value: yes\n              descriptor_key: admin\n    global_actions: # virtualHost level actions\n      - generic_key:\n          descriptor_value: yes\n          descriptor_key: vhaction\n    upstream_cluster: rate-limit-cluster # Limitador address reference\n    domain: toystore-app # RLS protocol domain value\n

Proposed new design for the WASM filter configuration object (PluginConfig struct):

#  The filter\u2019s behaviour in case the rate limiting service does not respond back. When it is set to true, Envoy will not allow traffic in case of communication failure between rate limiting service and the proxy.\nfailure_mode_deny: true\nrate_limit_policies:\n\n  - name: toystore\n    rate_limit_domain: toystore-app\n    upstream_cluster: rate-limit-cluster\n    hostnames: [\"*.toystore.com\"]\n    gateway_actions:\n      - rules:\n          - paths: [\"/admin/toy\"]\n            methods: [\"GET\"]\n            hosts: [\"pets.toystore.com\"]\n        configurations:\n          - actions:\n            - generic_key:\n                descriptor_key: admin\n                descriptor_value: \"1\"\n

Update highlights:

  • [minor] rate_limit_policies is a list instead of a map indexed by the name/namespace.
  • [major] no distinction between \"rules\" and global actions
  • [major] more aligned with RLS: multiple descriptors structured by \"rate limit configurations\" with matching rules
"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#wasm-shim","title":"WASM-SHIM","text":"

WASM filter rate limit policies are not exactly the same as user managed RateLimitPolicy custom resources. The WASM filter rate limit policies is part of the internal configuration and therefore not exposed to the end user.

At the WASM filter level, there are no route level or gateway level rate limit policies. The rate limit policies in the wasm plugin configuration may not map 1:1 to user managed RateLimitPolicy custom resources. WASM rate limit policies have an internal logical name and a set of hostnames to activate it based on the incoming request\u2019s host header.

The WASM filter builds a tree based data structure holding the rate limit policies. The longest (sub)domain match is used to select the policy to be applied. Only one policy is being applied per invocation.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#rate-limit-configurations","title":"rate limit configurations","text":"

The WASM filter configuration object contains a list of rate limit configurations to build a list of Envoy's RLS descriptors. These configurations are defined at

rate_limit_policies[*].gateway_actions[*].configurations\n

For example:

configurations:\n\n- actions:\n   - generic_key:\n        descriptor_key: admin\n        descriptor_value: \"1\"\n

How to read the policy:

  • Each configuration produces, at most, one descriptor. Depending on the incoming request, one configuration may or may not produce a rate limit descriptor.

  • Each policy configuration has associated, optionally, a set of rules to match. Rules allow matching hosts and/or methods and/or paths. Matching occurs when at least one rule applies against the incoming request. If rules are not set, it is equivalent to matching all the requests.

  • Each configuration object defines a list of actions. Each action may (or may not) produce a descriptor entry (descriptor list item). If an action cannot append a descriptor entry, no descriptor is generated for the configuration.

Note: The external rate limit service will be called when the gateway_actions object produces at least one not empty descriptor.

"},{"location":"kuadrant-operator/doc/proposals/rlp-target-gateway-resource/#example","title":"example","text":"

WASM filter rate limit policy for *.toystore.com. I want some rate limit descriptors configuration only for api.toystore.com and another set of descriptors for admin.toystore.com. The wasm filter config would look like this:

failure_mode_deny: true\nrate_limit_policies:\n\n  - name: toystore\n    rate_limit_domain: toystore-app\n    upstream_cluster: rate-limit-cluster\n    hostnames: [\"*.toystore.com\"]\n    gateway_actions:\n      - configurations:  # no rules. Applies to all *.toystore.com traffic\n          - actions:\n              - generic_key:\n                  descriptor_key: toystore-app\n                  descriptor_value: \"1\"\n      - rules:\n          - hosts: [\"api.toystore.com\"]\n        configurations:\n          - actions:\n              - generic_key:\n                  descriptor_key: api\n                  descriptor_value: \"1\"\n      - rules:\n          - hosts: [\"admin.toystore.com\"]\n        configurations:\n          - actions:\n              - generic_key:\n                  descriptor_key: admin\n                  descriptor_value: \"1\"\n
  • When a request for api.toystore.com hits the filter, the descriptors generated would be:

descriptor 1

(\"toystore-app\", \"1\")\n
descriptor 2
(\"api\", \"1\")\n

  • When a request for admin.toystore.com hits the filter, the descriptors generated would be:

descriptor 1

(\"toystore-app\", \"1\")\n
descriptor 2
(\"admin\", \"1\")\n

  • When a request for other.toystore.com hits the filter, the descriptors generated would be: descriptor 1
    (\"toystore-app\", \"1\")\n
"},{"location":"kuadrant-operator/doc/reference/authpolicy/","title":"The AuthPolicy Custom Resource Definition (CRD)","text":"
  • AuthPolicy
  • AuthPolicySpec
  • AuthScheme
    • AuthRuleCommon
    • AuthenticationRule
    • MetadataRule
    • AuthorizationRule
    • ResponseSpec
    • SuccessResponseSpec
      • SuccessResponseItem
    • CallbackRule
  • NamedPattern
  • AuthPolicyCommonSpec
  • AuthPolicyStatus
  • ConditionSpec
"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicy","title":"AuthPolicy","text":"Field Type Required Description spec AuthPolicySpec Yes The specification for AuthPolicy custom resource status AuthPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicyspec","title":"AuthPolicySpec","text":"Field Type Required Description targetRef LocalPolicyTargetReference Yes Reference to a Kubernetes resource that the policy attaches to rules AuthScheme No Implicit default authentication/authorization rules patterns MapNamedPattern> No Implicit default named patterns of lists of selector, operator and value tuples, to be reused in when conditions and pattern-matching authorization rules. when []PatternExpressionOrRef No List of implicit default additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames and spec.rules.matches fields, or when targeting a Gateway. defaults AuthPolicyCommonSpec No Explicit default definitions. This field is mutually exclusive with any of the implicit default definitions: spec.rules, spec.patterns, spec.when overrides AuthPolicyCommonSpec No Atomic overrides definitions. This field is mutually exclusive with any of the implicit or explicit default definitions: spec.rules, spec.patterns, spec.when, spec.default"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicycommonspec","title":"AuthPolicyCommonSpec","text":"Field Type Required Description rules AuthScheme No Authentication/authorization rules patterns MapNamedPattern> No Named patterns of lists of selector, operator and value tuples, to be reused in when conditions and pattern-matching authorization rules. when []PatternExpressionOrRef No List of additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames and spec.rules.matches fields, or when targeting a Gateway."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authscheme","title":"AuthScheme","text":"Field Type Required Description authentication MapAuthenticationRule> No Authentication rules. At least one config MUST evaluate to a valid identity object for the auth request to be successful. If omitted or empty, anonymous access is assumed. metadata MapMetadataRule> No Rules for fetching auth metadata from external sources. authorization MapAuthorizationRule> No Authorization rules. All policies MUST allow access for the auth request be successful. response ResponseSpec No Customizations to the response to the authorization request. Use it to set custom values for unauthenticated, unauthorized, and/or success access request. callbacks MapCallbackRule> No Rules for post-authorization callback requests to external services. Triggered regardless of the result of the authorization request."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authrulecommon","title":"AuthRuleCommon","text":"Field Type Required Description when []PatternExpressionOrRef No List of additional dynamic conditions (expressions) to activate the auth rule. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames and spec.rules.matches fields, or when targeting a Gateway. cache Caching spec No Caching options for the resolved object returned when applying this auth rule. (Default: disabled) priority Integer No Priority group of the auth rule. All rules in the same priority group are evaluated concurrently; consecutive priority groups are evaluated sequentially. (Default: 0) metrics Boolean No Whether the auth rule emits individual observability metrics. (Default: false)"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authenticationrule","title":"AuthenticationRule","text":"Field Type Required Description apiKey API Key authentication spec No Authentication based on API keys stored in Kubernetes secrets. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. kubernetesTokenReview KubernetesTokenReview spec No Authentication by Kubernetes token review. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. jwt JWT verification spec No Authentication based on JSON Web Tokens (JWT). Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. oauth2Introspection OAuth2 Token Introscpection spec No Authentication by OAuth2 token introspection. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. x509 X.509 authentication spec No Authentication based on client X.509 certificates. The certificates presented by the clients must be signed by a trusted CA whose certificates are stored in Kubernetes secrets. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. plain Plain identity object spec No Identity object extracted from the context. Use this method when authentication is performed beforehand by a proxy and the resulting object passed to Authorino as JSON in the auth request. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. anonymous Anonymous access No Anonymous access. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. credentials Auth credentials spec No Customizations to where credentials are required to be passed in the request for authentication based on this auth rule. Defaults to HTTP Authorization header with prefix \"Bearer\". overrides Identity extension spec No JSON overrides to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc). defaults Identity extension spec No JSON defaults to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc). (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#metadatarule","title":"MetadataRule","text":"Field Type Required Description http HTTP GET/GET-by-POST external metadata spec No External source of auth metadata via HTTP request. Use one of: http, userInfo, uma. userInfo OIDC UserInfo spec No OpendID Connect UserInfo linked to an OIDC authentication rule declared in this same AuthPolicy. Use one of: http, userInfo, uma. uma UMA metadata spec No User-Managed Access (UMA) source of resource data. Use one of: http, userInfo, uma. (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authorizationrule","title":"AuthorizationRule","text":"Field Type Required Description patternMatching Pattern-matching authorization spec No Pattern-matching authorization rules. Use one of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb. opa OPA authorization spec No Open Policy Agent (OPA) Rego policy. Use one of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb. kubernetesSubjectAccessReview Kubernetes SubjectAccessReview spec No Authorization by Kubernetes SubjectAccessReview. Use one of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb. spicedb SpiceDB authorization spec No Authorization decision delegated to external Authzed/SpiceDB server. Use one of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb. (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#responsespec","title":"ResponseSpec","text":"Field Type Required Description unauthenticated Custom denial status spec No Customizations on the denial status and other HTTP attributes when the request is unauthenticated. (Default: 401 Unauthorized) unauthorized Custom denial status spec No Customizations on the denial status and other HTTP attributes when the request is unauthorized. (Default: 403 Forbidden) success SuccessResponseSpec No Response items to be included in the auth response when the request is authenticated and authorized."},{"location":"kuadrant-operator/doc/reference/authpolicy/#successresponsespec","title":"SuccessResponseSpec","text":"Field Type Required Description headers Map<string:SuccessResponseItem> No Custom success response items wrapped as HTTP headers to be injected in the request. filters Map<string:SuccessResponseItem> No Custom success response items made available to other filters managed by Kuadrant (i.e. Rate Limit)."},{"location":"kuadrant-operator/doc/reference/authpolicy/#successresponseitem","title":"SuccessResponseItem","text":"Field Type Required Description plain Plain text response item No Plain text content. Use one of: plain, json, wristband. json JSON injection response item No Specification of a JSON object. Use one of: plain, json, wristband. wristband Festival Wristband token response item No Specification of a JSON object. Use one of: plain, json, wristband. key String No The key used to add the custom response item (name of the HTTP header or root property of the Dynamic Metadata object). Defaults to the name of the response item if omitted."},{"location":"kuadrant-operator/doc/reference/authpolicy/#callbackrule","title":"CallbackRule","text":"Field Type Required Description http HTTP endpoints callback spec No HTTP endpoint settings to build the callback request (webhook). (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#namedpattern","title":"NamedPattern","text":"Field Type Required Description selector String Yes A valid Well-known attribute whose resolved value in the data plane will be compared to value, using the operator. operator String Yes The binary operator to be applied to the resolved value specified by the selector. One of: eq (equal to), neq (not equal to), incl (includes; for arrays), excl (excludes; for arrays), matches (regex). value String Yes The static value to be compared to the one resolved from the selector."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicystatus","title":"AuthPolicyStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/authpolicy/#conditionspec","title":"ConditionSpec","text":"
  • The lastTransitionTime field provides a timestamp for when the entity last transitioned from one status to another.
  • The message field is a human-readable message indicating details about the transition.
  • The reason field is a unique, one-word, CamelCase reason for the condition\u2019s last transition.
  • The status field is a string, with possible values True, False, and Unknown.
  • The type field is a string with the following possible values:
  • Available: the resource has successfully configured;
Field Type Description type String Condition Type status String Status: True, False, Unknown reason String Condition state reason message String Condition state description lastTransitionTime Timestamp Last transition timestamp"},{"location":"kuadrant-operator/doc/reference/authpolicy/#high-level-example","title":"High-level example","text":"
apiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: my-auth-policy\nspec:\n  # Reference to an existing networking resource to attach the policy to. REQUIRED.\n  # It can be a Gateway API HTTPRoute or Gateway resource.\n  # It can only refer to objects in the same namespace as the AuthPolicy.\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute / Gateway\n    name: myroute / mygateway\n\n  # Additional dynamic conditions to trigger the AuthPolicy.\n  # Use it for filtering attributes not supported by HTTPRouteRule or with AuthPolicies that target a Gateway.\n  # Check out https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md to learn more\n  # about the Well-known Attributes that can be used in this field.\n  # Equivalent to if otherwise declared within `defaults`.\n  when: [\u2026]\n\n  # Sets of common patterns of selector-operator-value triples, to be referred by name in `when` conditions\n  # and pattern-matching rules. Often employed to avoid repetition in the policy.\n  # Equivalent to if otherwise declared within `defaults`.\n  patterns: { \u2026 }\n\n  # The auth rules to apply to the network traffic routed through the targeted resource.\n  # Equivalent to if otherwise declared within `defaults`.\n  rules:\n    # Authentication rules to enforce.\n    # At least one config must evaluate to a valid identity object for the auth request to be successful.\n    # If omitted or empty, anonymous access is assumed.\n    authentication:\n      \"my-authn-rule\":\n        # The authentication method of this rule.\n        # One-of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous.\n        apiKey: { \u2026 }\n\n        # Where credentials are required to be passed in the request for authentication based on this rule.\n        # One-of: authorizationHeader, customHeader, queryString, cookie.\n        credentials:\n          authorizationHeader:\n            prefix: APIKEY\n\n        # Rule-level additional conditions.\n        when: [\u2026]\n\n        # Configs for caching the resolved object returned out of evaluating this auth rule.\n        cache: { \u2026 }\n\n    # Rules for fetching auth metadata from external sources.\n    metadata:\n      \"my-external-source\":\n        # The method for fetching metadata from the external source.\n        # One-of: http: userInfo, uma.\n        http: { \u2026 }\n\n    # Authorization rules to enforce.\n    # All policies must allow access for the auth request be successful.\n    authorization:\n      \"my-authz-rule\":\n        # The authorization method of this rule.\n        # One-of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb.\n        opa: { \u2026 }\n\n    # Customizations to the authorization response.\n    response:\n      # Custom denial status and other HTTP attributes for unauthenticated requests.\n      unauthenticated: { \u2026 }\n\n      # Custom denial status and other HTTP attributes for unauhtorized requests.\n      unauthorized: { \u2026 }\n\n      # Custom response items when access is granted.\n      success:\n        # Custom response items wrapped as HTTP headers to be injected in the request\n        headers:\n          \"my-custom-header\":\n            # One-of: plain, json, wristband.\n            plain: { \u2026 }\n\n        # Custom response items wrapped as envoy dynamic metadata.\n        dynamicMetadata:\n          # One-of: plain, json, wristband.\n          \"my-custom-dyn-metadata\":\n            json: { \u2026 }\n\n    # Rules for post-authorization callback requests to external services.\n    # Triggered regardless of the result of the authorization request.\n    callbacks:\n      \"my-webhook\":\n        http: { \u2026 }\n\n    # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on\n    # routes that lack a more specific policy attached to.\n    # Mutually exclusive with `overrides` and with declaring the `rules`, `when` and `patterns` at the top-level of\n    # the spec.\n    defaults:\n      rules:\n        authentication: { \u2026 }\n        metadata: { \u2026 }\n        authorization: { \u2026 }\n        response: { \u2026 }\n        callbacks: { \u2026 }\n      when: [\u2026]\n      patterns: { \u2026 }\n\n    # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,\n    # thus also overriding any more specific policy occasionally attached to any of those routes.\n    # Mutually exclusive with `defaults` and with declaring `rules`, `when` and `patterns` at the top-level of\n    # the spec.\n    overrides:\n      rules:\n        authentication: { \u2026 }\n        metadata: { \u2026 }\n        authorization: { \u2026 }\n        response: { \u2026 }\n        callbacks: { \u2026 }\n      when: [\u2026]\n      patterns: { \u2026 }\n
"},{"location":"kuadrant-operator/doc/reference/dnspolicy/","title":"The DNSPolicy Custom Resource Definition (CRD)","text":"
  • DNSPolicy
  • DNSPolicySpec
  • excludeAddresses
  • ProviderRefs
  • HealthCheckSpec
  • LoadBalancingSpec
    • LoadBalancingWeighted
    • CustomWeight
    • LoadBalancingGeo
  • DNSPolicyStatus
  • HealthCheckStatus
"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicy","title":"DNSPolicy","text":"Field Type Required Description spec DNSPolicySpec Yes The specification for DNSPolicy custom resource status DNSPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicyspec","title":"DNSPolicySpec","text":"Field Type Required Description targetRef Gateway API LocalPolicyTargetReferenceWithSectionName Yes Reference to a Kubernetes resource that the policy attaches to healthCheck HealthCheckSpec No HealthCheck spec loadBalancing LoadBalancingSpec No LoadBalancing Spec providerRefs ProviderRefs Yes array of references to providers. (currently limited to max 1)"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#providerrefs","title":"ProviderRefs","text":"Field Type Required Description providerRefs []ProviderRef Yes max 1 reference. This is an array of providerRef that points to a local secret(s) that contains the required provider auth values"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#providerref","title":"ProviderRef","text":"Field Type Required Description name String Yes Name of the secret in the same namespace that contains the provider credentials"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#excludeaddresses","title":"ExcludeAddresses","text":"Field Type Required Description excludeAddresses []String No set of hostname, CIDR or IP Addresses to exclude from the DNS Provider"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#healthcheckspec","title":"HealthCheckSpec","text":"Field Type Required Description name String Yes Name of the secret in the same namespace that contains the provider credentials -------------------- ------------ :------------: ----------------------------------------------------------------------------------------------------------- path String Yes Path is the path to append to the host to reach the expected health check. Must start with \"?\" or \"/\", contain only valid URL characters and end with alphanumeric char or \"/\". For example \"/\" or \"/healthz\" are common port Number Yes Port to connect to the host on. Must be either 80, 443 or 1024-49151 protocol String Yes Protocol to use when connecting to the host, valid values are \"HTTP\" or \"HTTPS\" failureThreshold Number Yes FailureThreshold is a limit of consecutive failures that must occur for a host to be considered unhealthy interval Duration Yes Interval defines how frequently this probe should execute additionalHeadersRef String No AdditionalHeadersRef refers to a secret that contains extra headers to send in the probe request, this is primarily useful if an authentication token is required by the endpoint. allowInsecureCertificate Boolean No AllowInsecureCertificate will instruct the health check probe to not fail on a self-signed or otherwise invalid SSL certificate this is primarily used in development or testing environments"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#loadbalancingspec","title":"LoadBalancingSpec","text":"Field Type Required Description defaultGeo Boolean Yes Specifies if this is the default geo geo String Yes Geo value to apply to geo endpoints weight Number No Weight value to apply to weighted endpoints default: 120"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicystatus","title":"DNSPolicyStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []Kubernetes meta/v1.Condition List of conditions that define that status of the resource. healthCheck HealthCheckStatus HealthCheck status. recordConditions [String][]Kubernetes meta/v1.Condition Status of individual DNSRecords owned by this policy."},{"location":"kuadrant-operator/doc/reference/dnspolicy/#healthcheckstatus","title":"HealthCheckStatus","text":"Field Type Description conditions []Kubernetes meta/v1.Condition List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/dnspolicy/#high-level-example","title":"High-level example","text":"
apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: my-dns-policy\nspec:\n  # reference to an existing networking resource to attach the policy to\n  # it can only be a Gateway API Gateway resource\n  # it can only refer to objects in the same namespace as the DNSPolicy\n  # it can target a specific listener using sectionName\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: mygateway\n    sectionName: api # (optional) if not set policy applies to all listeners that do not have a policy attached directly\n\n  # reference to an existing secret resource containing provider credentials and configuration\n  # it can only refer to Secrets in the same namespace as the DNSPolicy that have the type kuadrant.io/(provider) e.g kuadrant.io/aws\n  providerRefs:\n\n    - name: my-aws-credentials\n\n  # (optional) loadbalancing specification\n  # use it for providing the specification of how dns will be configured in order to provide balancing of requests across multiple clusters. If not configured, a simple A or CNAME record will be created. If you have a policy with no loadbalancing defined and want to move to a loadbalanced configuration, you will need to delete and re-create the policy.\n  loadBalancing:\n    # is this the default geo to be applied to records. It is important that you set the default geo flag to true **Only** for the GEO value you wish to act as the catchall GEO, you should not set multiple GEO values as default for a given targeted listener. Example: policy 1 targets listener 1 with a geo of US and sets default to true. Policy 2 targets a listener on another cluster and set the geo to EU and default to false. It is fine for policies in the same default GEO to set the value to true. The main thing is to have only one unique GEO set as the default for any shared listener hostname.\n    defaultGeo: true\n    # weighted specification. This will apply the given weight to the records created based on the targeted gateway listeners. If you have multiple gateways that share a listener host, you can set different weight values to influence how much traffic will be brought to a given gateway.\n    weight: 100\n    # This is the actual GEO location to set for records created by this policy. This can and should be different if you have multiple gateways across multiple geographic areas.\n\n    # AWS: To see all regions supported by AWS Route 53, please see the official (documentation)[https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values-geo.html]. With Route 53 when setting a continent code use a \"GEO-\" prefix otherwise it will be considered a country code.\n\n    # GCP: To see all regions supported by GCP Cloud DNS, please see the official (documentation)[https://cloud.google.com/compute/docs/regions-zones]\n\n    #To see the different values you can use for the geo based DNS with Azure take a look at the following (documentation)[https://learn.microsoft.com/en-us/azure/traffic-manager/traffic-manager-geographic-regions]\n    geo: IE\n\n  # (optional) health check specification\n  # health check probes with the following specification will be created for each DNS target, these probes constantly check that the endpoint can be reached. They will flag an unhealthy endpoint in the status. If no DNSRecord has yet been published and the endpoint is unhealthy, the record will not be published until the health check passes.\n  healthCheck:\n    # the path on the listener host(s) that you want to check.\n    path: /health\n    # how many times does the health check need to fail before unhealthy.\n    failureThreshold: 3\n    # how often should it be checked.\n    interval: 5min\n    # additionalHeadersRef is reference to a local secret with a set of key value pairs to be used as headers when sending the health check request.\n    additionalHeadersRef:\n      name: headers\n
"},{"location":"kuadrant-operator/doc/reference/kuadrant/","title":"The Kuadrant Custom Resource Definition (CRD)","text":""},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrant","title":"kuadrant","text":"Field Type Required Description spec KuadrantSpec No Blank specification status KuadrantStatus No The status for the custom resources."},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrantspec","title":"KuadrantSpec","text":"

Currently blank specification.

"},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrantstatus","title":"KuadrantStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/","title":"The RateLimitPolicy Custom Resource Definition (CRD)","text":""},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicy","title":"RateLimitPolicy","text":"Field Type Required Description spec RateLimitPolicySpec Yes The specification for RateLimitPolicy custom resource status RateLimitPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicyspec","title":"RateLimitPolicySpec","text":"Field Type Required Description targetRef LocalPolicyTargetReferenceWithSectionName Yes Reference to a Kubernetes resource that the policy attaches to. For more info defaults RateLimitPolicyCommonSpec No Default limit definitions. This field is mutually exclusive with the limits field overrides RateLimitPolicyCommonSpec No Overrides limit definitions. This field is mutually exclusive with the limits field and defaults field. This field is only allowed for policies targeting Gateway in targetRef.kind limits MapLimit> No Limit definitions. This field is mutually exclusive with the defaults field"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#localpolicytargetreferencewithsectionname","title":"LocalPolicyTargetReferenceWithSectionName","text":"Field Type Required Description LocalPolicyTargetReference LocalPolicyTargetReference Yes Reference to a local policy target. sectionName SectionName No Section name for further specificity (if needed)."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#localpolicytargetreference","title":"LocalPolicyTargetReference","text":"Field Type Required Description group Group Yes Group of the target resource. kind Kind Yes Kind of the target resource. name ObjectName Yes Name of the target resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#sectionname","title":"SectionName","text":"Field Type Required Description SectionName v1.SectionName (String) Yes SectionName is the name of a section in a Kubernetes resource. In the following resources, SectionName is interpreted as the following: Gateway: Listener name HTTPRoute: HTTPRouteRule name* Service: Port name ### RateLimitPolicyCommonSpec Field Type Required Description when []Predicate No List of dynamic predicates to activate the policy. All expression must evaluate to true for the policy to be applied limits MapLimit> No Explicit Limit definitions. This field is mutually exclusive with RateLimitPolicySpec limits field"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#predicate","title":"Predicate","text":"Field Type Required Description predicate String Yes Defines one CEL expression that must be evaluated to bool"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#counter","title":"Counter","text":"Field Type Required Description expression String Yes Defines one CEL expression that will be used as rate limiting counter"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#limit","title":"Limit","text":"Field Type Required Description rates []RateLimit No List of rate limits associated with the limit definition counters []Counter No List of rate limit counter qualifiers. Items must be a valid Well-known attribute. Each distinct value resolved in the data plane starts a separate counter for each rate limit. when []Predicate No List of dynamic predicates to activate the limit. All expression must evaluate to true for the limit to be applied"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimit","title":"RateLimit","text":"Field Type Required Description limit Number Yes Maximum value allowed within the given period of time (duration) window String Yes The period of time that the limit applies. Follows Gateway API Duration format"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicystatus","title":"RateLimitPolicyStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#conditionspec","title":"ConditionSpec","text":"
  • The lastTransitionTime field provides a timestamp for when the entity last transitioned from one status to another.
  • The message field is a human-readable message indicating details about the transition.
  • The reason field is a unique, one-word, CamelCase reason for the condition\u2019s last transition.
  • The status field is a string, with possible values True, False, and Unknown.
  • The type field is a string with the following possible values:
    • Available: the resource has successfully configured;
Field Type Description type String Condition Type status String Status: True, False, Unknown reason String Condition state reason message String Condition state description lastTransitionTime Timestamp Last transition timestamp"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#high-level-example","title":"High-level example","text":"
apiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: my-rate-limit-policy\nspec:\n  # Reference to an existing networking resource to attach the policy to. REQUIRED.\n  # It can be a Gateway API HTTPRoute or Gateway resource.\n  # It can only refer to objects in the same namespace as the RateLimitPolicy.\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute / Gateway\n    name: myroute / mygateway\n\n  # The limits definitions to apply to the network traffic routed through the targeted resource.\n  # Equivalent to if otherwise declared within `defaults`.\n  limits:\n    \"my_limit\":\n      # The rate limits associated with this limit definition. REQUIRED.\n      # E.g., to specify a 50rps rate limit, add `{ limit: 50, duration: 1, unit: secod }`\n      rates: [\u2026]\n\n      # Counter qualifiers.\n      # Each dynamic value in the data plane starts a separate counter, combined with each rate limit.\n      # E.g., to define a separate rate limit for each user name detected by the auth layer, add `metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.username`.\n      # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n      counters: [\u2026]\n\n      # Additional dynamic conditions to trigger the limit.\n      # Use it for filtering attributes not supported by HTTPRouteRule or with RateLimitPolicies that target a Gateway.\n      # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n      when: [\u2026]\n\n    # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on\n    # routes that lack a more specific policy attached to.\n    # Mutually exclusive with `overrides` and with declaring `limits` at the top-level of the spec.\n    defaults:\n      limits: { \u2026 }\n\n    # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,\n    # thus also overriding any more specific policy occasionally attached to any of those routes.\n    # Mutually exclusive with `defaults` and with declaring `limits` at the top-level of the spec.\n    overrides:\n      limits: { \u2026 }\n
"},{"location":"kuadrant-operator/doc/reference/tlspolicy/","title":"The TLSPolicy Custom Resource Definition (CRD)","text":"
  • TLSPolicy
  • TLSPolicySpec
  • TLSPolicyStatus
"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicy","title":"TLSPolicy","text":"Field Type Required Description spec TLSPolicySpec Yes The specification for TLSPolicy custom resource status TLSPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicyspec","title":"TLSPolicySpec","text":"Field Type Required Description targetRef Gateway API LocalPolicyTargetReference Yes Reference to a Kuberentes resource that the policy attaches to issuerRef CertManager meta/v1.ObjectReference Yes IssuerRef is a reference to the issuer for the created certificate commonName String No CommonName is a common name to be used on the created certificate duration Kubernetes meta/v1.Duration No The requested 'duration' (i.e. lifetime) of the created certificate. renewBefore Kubernetes meta/v1.Duration No How long before the currently issued certificate's expiry cert-manager should renew the certificate. usages []CertManager v1.KeyUsage No Usages is the set of x509 usages that are requested for the certificate. Defaults to digital signature and key encipherment if not specified revisionHistoryLimit Number No RevisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history privateKey CertManager meta/v1.CertificatePrivateKey No Options to control private keys used for the Certificate

IssuerRef certmanmetav1.ObjectReference

"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicystatus","title":"TLSPolicyStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []Kubernetes meta/v1.Condition List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/","title":"Enforcing authentication & authorization with Kuadrant AuthPolicy","text":"

This guide walks you through the process of setting up a local Kubernetes cluster with Kuadrant where you will protect Gateway API endpoints by declaring Kuadrant AuthPolicy custom resources.

Three AuthPolicies will be declared:

Use case AuthPolicies App developer 2 AuthPolicies targeting a HTTPRoute that routes traffic to a sample \"Toy Store\" application \u2192 enforce API key authentication to all requests in this route; require API key owners to be mapped to groups:admins metadata to access a specific HTTPRouteRule of the route. Platform engineer use-case 1 AuthPolicy targeting the kuadrant-ingressgateway Gateway \u2192 enforces a trivial \"deny-all\" policy that locks down any other HTTPRoute attached to the Gateway.

Topology:

                            \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                            \u2502        (Gateway)        \u2502   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                            \u2502 kuadrant-ingressgateway \u2502\u25c4\u2500\u2500\u2502 (AuthPolicy)  \u2502\n                            \u2502                         \u2502   \u2502    gw-auth    \u2502\n                            \u2502            *            \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                            \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                              \u25b2                      \u25b2\n                     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u2502   (HTTPRoute)    \u2502   \u2502   (HTTPRoute)    \u2502\n\u2502  (AuthPolicy)  \u2502\u2500\u2500\u25ba\u2502    toystore      \u2502   \u2502      other       \u2502\n\u2502 toystore-authn \u2502   \u2502                  \u2502   \u2502                  \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518   \u2502 api.toystore.com \u2502   \u2502 *.other-apps.com \u2502\n                     \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                      \u25b2                \u25b2\n            \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n            | (HTTPRouteRule) | | (HTTPRouteRule) |   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n            |     rule-1      | |     rule-2      |\u25c4\u2500\u2500\u2502   (AuthPolicy)  \u2502\n            |                 | |                 |   \u2502 toystore-admins \u2502\n            | - GET /cars*    | | - /admins*      |   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n            | - GET /dolls*   | \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n            \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#setup-the-environment","title":"Setup the environment","text":"

Follow this setup doc to set up your environment before continuing with this doc.

"},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#deploy-the-toy-store-sample-application-persona-app-developer","title":"Deploy the Toy Store sample application (Persona: App developer)","text":"
kubectl apply -f examples/toystore/toystore.yaml\n\nkubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - api.toystore.com\n  rules:\n  - matches: # rule-1\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/cars\"\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/dolls\"\n    backendRefs:\n    - name: toystore\n      port: 80\n  - matches: # rule-2\n    - path:\n        type: PathPrefix\n        value: \"/admin\"\n    backendRefs:\n    - name: toystore\n      port: 80\nEOF\n

Export the gateway hostname and port:

export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n

Send requests to the application unprotected:

curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 200 OK\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/dolls -i\n# HTTP/1.1 200 OK\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#protect-the-toy-store-application-persona-app-developer","title":"Protect the Toy Store application (Persona: App developer)","text":"

Create AuthPolicies to enforce the following auth rules:

  • Authentication:
  • All users must present a valid API key
  • Authorization:
  • /admin* paths (2nd rule of the HTTPRoute) require user mapped to the admins group (kuadrant.io/groups=admins annotation added to the Kubernetes API key Secret)
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore-authn\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  defaults:\n    strategy: merge\n    rules:\n      authentication:\n        \"api-key-authn\":\n          apiKey:\n            selector:\n              matchLabels:\n                app: toystore\n          credentials:\n            authorizationHeader:\n              prefix: APIKEY\n---\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore-admins\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n    sectionName: rule-2\n  rules:\n    authorization:\n      \"only-admins\":\n        opa:\n          rego: |\n            groups := split(object.get(input.auth.identity.metadata.annotations, \"kuadrant.io/groups\", \"\"), \",\")\n            allow { groups[_] == \"admins\" }\nEOF\n

Create the API keys (must be created in the same namespace as the Kuadrant CR):

kubectl apply -n kuadrant-system -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-regular-user\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\nstringData:\n  api_key: iamaregularuser\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-admin-user\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    kuadrant.io/groups: admins\nstringData:\n  api_key: iamanadmin\ntype: Opaque\nEOF\n

Send requests to the application protected by Kuadrant:

curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-key-authn\"\n# x-ext-auth-reason: credential not found\n
curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamaregularuser' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 200 OK\n
curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamaregularuser' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamanadmin' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#create-a-default-deny-all-policy-at-the-level-of-the-gateway-persona-platform-engineer","title":"Create a default \"deny-all\" policy at the level of the gateway (Persona: Platform engineer)","text":"

Create the policy:

kubectl -n gateway-system apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: gw-auth\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: kuadrant-ingressgateway\n  defaults:\n    strategy: atomic\n    rules:\n      authorization:\n        deny-all:\n          opa:\n            rego: \"allow = false\"\n      response:\n        unauthorized:\n          headers:\n            \"content-type\":\n              value: application/json\n          body:\n            value: |\n              {\n                \"error\": \"Forbidden\",\n                \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n              }\nEOF\n

The policy won't be effective until there is at least one accepted route not yet protected by another more specific policy attached to it.

Create a route that will inherit the default policy attached to the gateway:

kubectl apply -f -<<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: other\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - \"*.other-apps.com\"\nEOF\n

Send requests to the route protected by the default policy set at the level of the gateway:

curl -H 'Host: foo.other-apps.com' http://$GATEWAY_URL/ -i\n# HTTP/1.1 403 Forbidden\n# content-type: application/json\n# x-ext-auth-reason: Unauthorized\n# [\u2026]\n#\n# {\n#   \"error\": \"Forbidden\",\n#   \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n# }\n
"},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#cleanup","title":"Cleanup","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/","title":"Basic DNS","text":""},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/#basic-dns-setup","title":"Basic DNS setup","text":"

The document will cover the most basic DNS setup using the Kuadrant DNSPolicy API. In order to follow this guide, it is expected that you have a cluster setup with the latest version of Kuadrant installed. Also as we are using DNS, it is also important that the Gateways are accessible either via your local network or via the public internet. DNSPolicy will work with any Gateway provider so it is not essential that you have Istio or Envoy Gateway installed, but you do need a Gateway API provider installed. We would recommend using Istio or Envoy Gateway as this will allow you to use some of the other policies provided by Kuadrant.

"},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/#gateway-and-httproute-configuration","title":"Gateway and HTTPRoute configuration","text":"

With a Gateway provider installed, in order to configure DNS via DNSPolicy, you must first configure a Gateway with a listener that uses a specified hostname. You must also have a HTTPRoute resource attached to this gateway listener. Below are some simple examples of these resources (note we are not using a HTTPS listener for simplicity but that will also work):

---\nkind: Gateway\napiVersion: gateway.networking.k8s.io/v1\nmetadata:\n  name: external\nspec:\n  gatewayClassName: istio\n  listeners:\n\n    - name: http\n      port: 8080\n      hostname: test.example.com\n      protocol: HTTP\n---\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\n  labels:\n    app: toystore\nspec:\n  parentRefs:\n    - name: external\n  hostnames: [\"test.example.com\"]\n  rules:\n    - matches:\n        - path:\n            type: PathPrefix\n            value: \"/toy\"\n          method: GET\n        - path:\n            type: Exact\n            value: \"/admin/toy\"\n          method: POST\n        - path:\n            type: Exact\n            value: \"/admin/toy\"\n          method: DELETE\n      backendRefs:\n        - name: toystore\n          port: 80\n
With these defined, we are ready to setup DNS via DNSPolicy.

"},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/#configure-a-dnsprovider","title":"Configure a DNSProvider","text":"

The first step is to configure a DNSProvider. This is a simple kubernetes secret with credentials to access the DNS provider. With Kuadrant we support using AWS Route53, Azure and GCP as DNS providers. It is important that this credential has access to write and read to your DNS zones.

More info on the various DNS Providers

In this example we will configure an AWS route53 DNS provider:

kubectl create secret generic aws-credentials \\\n  --namespace=my-gateway-namespace \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=XXXX \\\n  --from-literal=AWS_REGION=eu-west-1 \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=XXX\n

With this in place we can now define our DNSPolicy resource:

apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: basic-dnspolicy\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external\n  providerRefs:\n\n    - name: aws-credentials\n

This resource also needs to be created in the same namespace as your Gateway and the targetRef needs to reference your gateway. When this is done we can check the status of the DNSPolicy and the Gateway to check when it is ready.

kubectl wait dnspolicy/basic-dnspolicy -n my-gateway-namespace --for=\"condition=Ready=true\" --timeout=300s\n

If you look at the gateway status you should also see:

  - lastTransitionTime: \"2024-10-09T11:22:10Z\"\n    message: Object affected by DNSPolicy kuadrant-system/simple-dnspolicy\n    observedGeneration: 1\n    reason: Accepted\n    status: \"True\"\n    type: kuadrant.io/DNSPolicyAffected\n

DNS is now setup for your Gateway. After allowing a little time for the DNS propagate to the nameservers, you should be able to test the DNS using a dig command alternatively you can curl your endpoint.

dig test.example.com +short\n\ncurl -v test.example.com/toy\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/#important-considerations","title":"Important Considerations","text":"

With this guide, you have learned how to setup the most basic DNSPolicy. DNSPolicy is also capable of setting up advanced DNS record structure to help balance traffic across multiple gateways. With the most basic policy outlined here, you should not apply it to more than one gateway that shares a listener with the same host name. There is one exception to this rule, which is if all your gateways are using IP addresses rather than hostname addresses; in this case DNSPolicy will merge the IPs into a multi-value response. However, if your Gateways are using hostnames, DNSPolicy will set up a simple CNAME record and as there is only one record and CNAMEs cannot have multiple values by definition, one of the DNSPolicies (the last one to attempt to update the provider) will report an error.

"},{"location":"kuadrant-operator/doc/user-guides/dns/dns-excluding-specific-addresses/","title":"Dns excluding specific addresses","text":""},{"location":"kuadrant-operator/doc/user-guides/dns/dns-excluding-specific-addresses/#excluding-specific-addresses-from-being-published","title":"Excluding specific addresses from being published","text":"

By default DNSPolicy takes all the addresses published in the status of the Gateway it is targeting and use these values in the DNSRecord it publishes to chosen DNS provider.

There could be cases where you have an address assigned to a gateway that you do not want to publish to a DNS provider, but you still want DNSPolicy to publish records for other addresses.

To prevent a gateway address being published to the DNS provider, you can set the excludeAddresses field in the DNSPolicy resource targeting the gateway. The excludeAddresses field can be set to a hostname, an IPAddress or a CIDR.

Below is an example of a DNSPolicy excluding a hostname:

apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: prod-web\n  namespace: ${DNSPOLICY_NAMESPACE}\nspec:\n  targetRef:\n    name: prod-web-istio\n    group: gateway.networking.k8s.io\n    kind: Gateway\n  providerRefs:\n\n    - name: aws-credentials\n  loadBalancing:\n    weight: 120\n    geo: EU\n    defaultGeo: true\n  excludeAddresses:\n    - \"some.local.domain\"\n

In the above case some.local.domain will not be set up as a CNAME record in the DNS provider.

Note: It is valid to exclude all addresses. However this will result in existing records being removed and no new ones being created.

"},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/","title":"DNS Health Checks","text":"

The DNS health check feature allows you to define a HTTP based health check via the DNSPolicy API that will be executed against targeted gateway listener(s) that have specified none wildcard hostnames. These health checks will flag a published endpoint as healthy or unhealthy based on the defined configuration. When unhealthy an endpoint will not be published if it has not already been published to the DNS provider, will only be unpublished if it is part of a multi-value A record and in all cases can be observable via the DNSPolicy status.

"},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#limitations","title":"Limitations","text":"
  • We do not currently support a health check being targeted to a HTTPRoute resource: DNSPolicy can only target Gateways.
  • As mentioned above, when a record has been published using the load balancing options (GEO and Weighting) via DNSPolicy, a failing health check will not remove the endpoint record from the provider, this is to avoid an accidental NX-Domain response. If the policy is not using the load balancing options and results in a multiple value A record, then unhealthy IPs will be removed from this A record unless it would result in an empty value set.
  • Health checks will not be added to listeners that define a wildcard hostname E.G (*.example.com) as we currently cannot know which host to use to for the health check.
"},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#configuration-of-health-checks","title":"Configuration of Health Checks","text":"

To configure a DNS health check, you need to specify the health check section of the DNSPolicy.

Below are some examples of DNSPolicy with health checks defined:

1) DNSPolicy with a health check that will be applied to all listeners on a gateway that define a none wildcard hostname

apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: gateway-dns\nspec:\n  healthCheck:\n    failureThreshold: 3\n    interval: 5m\n    path: /health\n  ...\n   targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external  \n

2) DNSPolicy with health check that will be applied for a specific listener with a none wildcard hostname

apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: my-listener-dns\nspec:\n  healthCheck:\n    failureThreshold: 3\n    interval: 5m\n    path: /ok #different path for this listener\n  ...\n   targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external  \n    sectionName: my-listener #notice the addition of section name here that must match the listener name\n

These policies can be combined on a single gateway. The policy with the section name defined will override the gateway policy including the health check.

"},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#sending-additional-headers-with-the-health-check-request","title":"Sending additional headers with the health check request","text":"

Sometimes, it may be desirable to send some additional headers with the health check request. For example to send API key or service account token that can be defined in the request headers.

To do this you will need to create a secret in the same namespace as the DNSPolicy with the keys and values you wish to send:

kubectl create secret generic healthheaders --from-literal=token=supersecret -n my-dns-policy-namespace\n

Next you will need to update the DNSPolicy to add a reference to this secret:

apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: my-listener-dns\nspec:\n  healthCheck:\n    additionalHeadersRef: #add the following\n      name: healthheaders\n    failureThreshold: 3\n    interval: 5m\n    path: /ok\n  ...\n   targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external  \n    sectionName: my-listener\n

The health check requests will now send the key value pairs in the secret as headers when performing a health check request.

"},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#health-check-status","title":"Health Check Status","text":"

When all health checks based on a DNSPolicy are passing you will see the following status:

    - lastTransitionTime: \"2024-11-14T12:33:13Z\"\n      message: All sub-resources are healthy\n      reason: SubResourcesHealthy\n      status: \"True\"\n      type: SubResourcesHealthy\n

If one or more of the health checks are failing you will see a status in the DNSPolicy simiar to the one shown below:

   - lastTransitionTime: \"2024-11-15T10:40:15Z\"\n      message: 'DNSPolicy has encountered some issues: not all sub-resources of policy\n        are passing the policy defined health check. Not healthy DNSRecords are: external-t1b '\n      reason: Unknown\n      status: \"False\"\n      type: SubResourcesHealthy\n    observedGeneration: 1\n    recordConditions:\n      t1b.cb.hcpapps.net:\n      - lastTransitionTime: \"2024-11-15T10:40:14Z\"\n        message: 'Not healthy addresses: [aeeba26642f1b47d9816297143e2d260-434484576.eu-west-1.elb.amazonaws.com]'\n        observedGeneration: 1\n        reason: health checksFailed\n        status: \"False\"\n        type: Healthy\n

Finally, you can also take a look at the underlying individual health check status by inspecting the dnshealthcheckprobe resource:

Note: These resources are for view only interactions as they are controlled by the Kuadrant Operator based on the DNSPolicy API

kubectl get dnshealthcheckprobes n my-dns-policy-namespace -o=wide\n

If you look at the status of one of these you can see additional information:

status:\n  consecutiveFailures: 3\n  healthy: false\n  observedGeneration: 1\n  reason: 'Status code: 503'\n  status: 503\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#manually-removing-unhealthy-records","title":"Manually removing unhealthy records","text":"

If you have a failing health check for one of your gateway listeners and you would like to remove it from the DNS provider, you can do this by deleting the associated DNSRecord resource.

Finding the correct record

DNSRecord resources are kept in the same namespace as the DNSPolicy that configured and created them.

kubectl get dnsrecords.kuadrant.io -n <dns-policy-namespace>\n

As shown above, when a health check is failing, the DNSPolicy will show a status for that listener host to surface that failure:

recordConditions:\n    t1a.cb.hcpapps.net:\n\n    - lastTransitionTime: \"2024-11-27T14:00:52Z\"\n      message: 'Not healthy addresses: [ae4d131ee5d7b4fb098f4afabf4aba4c-513237325.us-east-1.elb.amazonaws.com]'\n      observedGeneration: 1\n      reason: HealthChecksFailed\n      status: \"False\"\n      type: Healthy\n

The DNSRecord resource is named after the gateway and the listener name. So if you have a gateway called ingress and a listener called example you will have a DNSRecord resource named ingress-example in the same namespace as your DNSPolicy. So from this status you can get the hostname and find the associated listener on your gateway. You can then delete the associated DNSRecord resource.

kubectl delete dnsrecord.kuadrant.io <gateway-name>-<listener-name> -n <dns policy namespace>\n

Removing this resource will remove all of the associated DNS records in the DNS provider and while the health check is failing, the dns operator will not re-publish these records.

"},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/","title":"Gateway DNS configuration for routes attached to a ingress gateway","text":"

This user guide walks you through an example of how to configure DNS for all routes attached to an ingress gateway.

"},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#requisites","title":"Requisites","text":"
  • Docker
  • Rout53 Hosted Zone
"},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#setup-the-environment","title":"Setup the environment","text":"

Follow this setup doc to set up your environment before continuing with this doc.

Create a namespace:

kubectl create namespace my-gateways\n

Export a root domain and hosted zone id:

export ROOT_DOMAIN=<ROOT_DOMAIN>\n

Note: ROOT_DOMAIN should be set to your AWS hosted zone name.

"},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#create-a-dns-provider-secret","title":"Create a dns provider secret","text":"

Create AWS provider secret. You should limit the permissions of this credential to only the zones you want us to access.

export AWS_ACCESS_KEY_ID=<AWS_ACCESS_KEY_ID> AWS_SECRET_ACCESS_KEY=<AWS_SECRET_ACCESS_KEY>\n\nkubectl -n my-gateways create secret generic aws-credentials \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#create-an-ingress-gateway","title":"Create an ingress gateway","text":"

Create a gateway using your ROOT_DOMAIN as part of a listener hostname:

kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: prod-web\nspec:\n  gatewayClassName: istio\n  listeners:\n\n    - allowedRoutes:\n        namespaces:\n          from: All\n      name: api\n      hostname: \"*.$ROOT_DOMAIN\"\n      port: 80\n      protocol: HTTP\nEOF\n

Check gateway status:

kubectl get gateway prod-web -n my-gateways\n

Response:

NAME       CLASS   ADDRESS        PROGRAMMED   AGE\nprod-web   istio   172.18.200.1   True         25s\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#enable-dns-on-the-gateway","title":"Enable DNS on the gateway","text":"

Create a Kuadrant DNSPolicy to configure DNS:

kubectl -n my-gateways apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: prod-web\nspec:\n  targetRef:\n    name: prod-web\n    group: gateway.networking.k8s.io\n    kind: Gateway\nEOF\n

Check policy status:

kubectl get dnspolicy -o wide -n my-gateways\n

Response:

NAME       STATUS     TARGETREFKIND   TARGETREFNAME   AGE\nprod-web   Accepted   Gateway         prod-web        26s\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#deploy-a-sample-api-to-test-dns","title":"Deploy a sample API to test DNS","text":"

Deploy the sample API:

kubectl -n my-gateways apply -f examples/toystore/toystore.yaml\nkubectl -n my-gateways wait --for=condition=Available deployments toystore --timeout=60s\n

Route traffic to the API from our gateway:

kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: prod-web\n    namespace: my-gateways\n  hostnames:\n  - \"*.$ROOT_DOMAIN\"\n  rules:\n  - backendRefs:\n    - name: toystore\n      port: 80\nEOF\n

Verify a DNSRecord resource is created:

kubectl get dnsrecords -n my-gateways\nNAME           READY\nprod-web-api   True\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#verify-dns-works-by-sending-requests","title":"Verify DNS works by sending requests","text":"

Verify DNS using dig:

dig foo.$ROOT_DOMAIN +short\n

Response:

172.18.200.1\n

Verify DNS using curl:

curl http://api.$ROOT_DOMAIN\n

Response:

{\n  \"method\": \"GET\",\n  \"path\": \"/\",\n  \"query_string\": null,\n  \"body\": \"\",\n  \"headers\": {\n    \"HTTP_HOST\": \"api.$ROOT_DOMAIN\",\n    \"HTTP_USER_AGENT\": \"curl/7.85.0\",\n    \"HTTP_ACCEPT\": \"*/*\",\n    \"HTTP_X_FORWARDED_FOR\": \"10.244.0.1\",\n    \"HTTP_X_FORWARDED_PROTO\": \"http\",\n    \"HTTP_X_ENVOY_INTERNAL\": \"true\",\n    \"HTTP_X_REQUEST_ID\": \"9353dd3d-0fe5-4404-86f4-a9732a9c119c\",\n    \"HTTP_X_ENVOY_DECORATOR_OPERATION\": \"toystore.my-gateways.svc.cluster.local:80/*\",\n    \"HTTP_X_ENVOY_PEER_METADATA\": \"ChQKDkFQUF9DT05UQUlORVJTEgIaAAoaCgpDTFVTVEVSX0lEEgwaCkt1YmVybmV0ZXMKHQoMSU5TVEFOQ0VfSVBTEg0aCzEwLjI0NC4wLjIyChkKDUlTVElPX1ZFUlNJT04SCBoGMS4xNy4yCtcBCgZMQUJFTFMSzAEqyQEKIwoVaXN0aW8uaW8vZ2F0ZXdheS1uYW1lEgoaCHByb2Qtd2ViChkKDGlzdGlvLmlvL3JldhIJGgdkZWZhdWx0CjMKH3NlcnZpY2UuaXN0aW8uaW8vY2Fub25pY2FsLW5hbWUSEBoOcHJvZC13ZWItaXN0aW8KLwojc2VydmljZS5pc3Rpby5pby9jYW5vbmljYWwtcmV2aXNpb24SCBoGbGF0ZXN0CiEKF3NpZGVjYXIuaXN0aW8uaW8vaW5qZWN0EgYaBHRydWUKGgoHTUVTSF9JRBIPGg1jbHVzdGVyLmxvY2FsCigKBE5BTUUSIBoecHJvZC13ZWItaXN0aW8tYzU0NWQ4ZjY4LTdjcjg2ChoKCU5BTUVTUEFDRRINGgtteS1nYXRld2F5cwpWCgVPV05FUhJNGktrdWJlcm5ldGVzOi8vYXBpcy9hcHBzL3YxL25hbWVzcGFjZXMvbXktZ2F0ZXdheXMvZGVwbG95bWVudHMvcHJvZC13ZWItaXN0aW8KFwoRUExBVEZPUk1fTUVUQURBVEESAioACiEKDVdPUktMT0FEX05BTUUSEBoOcHJvZC13ZWItaXN0aW8=\",\n    \"HTTP_X_ENVOY_PEER_METADATA_ID\": \"router~10.244.0.22~prod-web-istio-c545d8f68-7cr86.my-gateways~my-gateways.svc.cluster.local\",\n    \"HTTP_X_ENVOY_ATTEMPT_COUNT\": \"1\",\n    \"HTTP_X_B3_TRACEID\": \"d65f580db9c6a50c471cdb534771c61a\",\n    \"HTTP_X_B3_SPANID\": \"471cdb534771c61a\",\n    \"HTTP_X_B3_SAMPLED\": \"0\",\n    \"HTTP_VERSION\": \"HTTP/1.1\"\n  },\n  \"uuid\": \"0ecb9f84-db30-4289-a3b8-e22d4021122f\"\n}\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#cleanup","title":"Cleanup","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/","title":"Load Balanced DNS","text":""},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#overview","title":"Overview","text":"

This document will show you how to setup a load balanced DNS configuration using the (DNSPolicy)[https://docs.kuadrant.io/latest/kuadrant-operator/doc/reference/dnspolicy/] API. When we say \"load balanced\", this means we configure the DNS provider (AWS, GCP etc) to return different gateway/loadbalancer addresses to queries from DNS clients based on specific weighting and geo location configuration.

"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#when-should-i-use-a-load-balanced-dns-policy","title":"When should I use a load balanced DNS policy?","text":"

It is most useful to use the load balancing options when targeting multiple gateways that share a listener host E.G (api.example.com). It is also perfectly valid to use it when you only have a single gateway; this provides the benefit of allowing you to easily expand beyond this single gateway for a given shared hostname. It is worth knowing that the load balanced DNSpolicy comes with a relatively small additional cost of some added records and lookups during DNS resolution vs a \"simple\" DNSPolicy with no load balancing specified as the latter only sets up a simple A or CNAME record. So in summary if you expect to need multiple gateways for a given listener host then you should take advantage of the load balanced option.

"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#important-considerations","title":"Important Considerations","text":"
  • When using a DNSPolicy with a load balanced configuration, all DNSPolicies effecting a listener with the same hostname should have load balanced options set. Without the load balanced configuration, Kuadrant's dns controller will try to set up only a simple A or CNAME record.
  • When setting geographic configuration, only ever set one unique GEO as the default GEO across all instances of DNSPolicy targeting a listener with the same hostname. If you set different defaults for a single listener hostname, the dns controllers will constantly attempt to bring the default into the state they each feel is correct.
  • If you want different load balancing options for a particular listener in a gateway, you can target that listener directly with DNSPolicy via the targetRef sectionName property.
  • If you do not use the load balanced configuration, a simple single A or CNAME record is set up. Later if you need to move to load balanced, you will need to delete and recreate your policy.
"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#dns-provider-setup","title":"DNS Provider Setup","text":"

A DNSPolicy acts against a target Gateway or a target listener within a gateway by processing the hostnames on the targeted listeners. Using these it can create dns records using the address exposed in the Gateway's status block. In order for Kuadrant's DNS component to do this, it must be able to access and know which DNS provider to use. This is done through the creation of a dns provider secret containing the needed credentials and the provider identifier.

(Learn more about how to setup a DNS Provider)[https://docs.kuadrant.io/latest/dns-operator/docs/provider/]

"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#loadbalanced-dnspolicy-creation-and-attachment","title":"LoadBalanced DNSPolicy creation and attachment","text":"

Once an appropriate provider credential is configured, we can now create and attach a DNSPolicy to start managing DNS for the listeners on our Gateway. Below is an example.

apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: prod-web\n  namespace: ingress-gateway\nspec:\n  targetRef:\n    name: prod-web\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    sectionName: listenerName \n  providerRef:\n    name: my-aws-credentials \n  loadBalancing:\n    weight: 120 \n    geo: GEO-EU \n    defaultGeo: true\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#load-balancing-section","title":"Load Balancing section","text":"

This section must be filled out and indicates to the dns component that the targets of this policy should be setup to handle more than one gateway. It is required to define values for the weighted and geo options. These values are used for the records created by the policy controller based on the target gateway. To read more detail about each of the fields in the loadbalanced section take a look at DNS Overview

"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#locations-supported-per-dns-provider","title":"Locations supported per DNS provider","text":"Supported AWS GCP Continents Country codes States Regions"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#continents-and-country-codes-supported-by-aws-route-53","title":"Continents and country codes supported by AWS Route 53","text":"

:Note: For more information please the official AWS documentation

To see all regions supported by AWS Route 53, please see the official (documentation)[https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values-geo.html]. With Route 53 when setting a continent code use a \"GEO-\" prefix otherwise it will be considered a country code.

"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#regions-supported-by-gcp-cloud-dns","title":"Regions supported by GCP Cloud DNS","text":"

To see all regions supported by GCP Cloud DNS, please see the official (documentation)[https://cloud.google.com/compute/docs/regions-zones]

"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#regions-and-countries-supported-by-azure-cloud-dns","title":"Regions and Countries supported by Azure Cloud DNS","text":"

To see the different values you can use for the geo based DNS with Azure take a look at the following (documentation)[https://learn.microsoft.com/en-us/azure/traffic-manager/traffic-manager-geographic-regions]

"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#moving-from-non-load-balanced-to-load-balanced-or-vice-versa","title":"Moving from non load balanced to load balanced or vice versa","text":"

It is possible to update a DNSPolicy that has no load balancing options set to one that has these options set and vice versa. Underneath, the DNS Operator will remove the existing records and replace them with the correct set of records based on your configuration. It is important however that when using DNSPolicy across multiple Gateways that share a hostname, the DNSPolicies targeting a listener with a shared hostname all use a load balancing configuration (or absence thereof). It is invalid to have two DNSPolcies targeting a listener with a shared hostname that use different dns strategies. Doing so will cause one of the DNSPolicies to fail to be enforced and report an error caused by an inability to bring the DNS records into a consistent state.

Example:

If you have gateway1 with listener example with a hostname of example.com and you have a separate gateway gateway2 with the same listener definition as gateway1 (perhaps on a different cluster in a different region), you should ensure that the DNSPolcies targeting these listeners are both using a loadbalanced configuration. Below is an example of valid and invalid configuration.

Valid Config

Given a gateway deployed on two different cluster in two different locations:

# example gateway\nkind: Gateway\napiVersion: gateway.networking.k8s.io/v1\nmetadata:\n  name: api-gateway\nspec:\n  gatewayClassName: istio\n  listeners:\n\n    - name: example\n      port: 80\n      hostname: 'api.example.com'\n      protocol: HTTP\n
# gateway 1\n\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: dnspolicy-gateway1\nspec:\n  loadBalancing:\n    weight: 130\n    geo: GEO-EU\n    defaultGeo: true\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: api-gateway\n    sectionName: example\n  providerRefs:\n\n    - name: aws-provider-credentials\n\n# gateway 2\n\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: dnspolicy-gateway2\nspec:\n  loadBalancing:\n    weight: 130\n    geo: US\n    defaultGeo: false\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: api-gateway\n    sectionName: example\n  providerRefs:\n\n    - name: aws-provider-credentials\n

Invalid Config

# gateway 1\n\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: dnspolicy-gateway1\nspec:\n  loadBalancing:\n    weight: 130\n    geo: GEO-EU\n    defaultGeo: true\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: api-gateway\n    sectionName: example\n  providerRefs:\n\n    - name: aws-provider-credentials\n\n# gateway 2\n\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: dnspolicy-gateway2\nspec: #notice no loadbalancing defined\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: api-gateway\n    sectionName: example\n  providerRefs:\n\n    - name: aws-provider-credentials\n
"},{"location":"kuadrant-operator/doc/user-guides/dns/orphan-dns-records/","title":"Orphan dns records","text":""},{"location":"kuadrant-operator/doc/user-guides/dns/orphan-dns-records/#orphan-dns-records","title":"Orphan DNS Records","text":"

This document is focused around multi-cluster DNS where you have more than one instance of a gateway that shares a common hostname with other gateways and assumes you have the observability stack set up.

"},{"location":"kuadrant-operator/doc/user-guides/dns/orphan-dns-records/#what-is-an-orphan-record","title":"What is an orphan record?","text":"

An orphan DNS record is a record or set of records that are owned by an instance of the DNS operator that no longer has a representation of those records on its cluster.

"},{"location":"kuadrant-operator/doc/user-guides/dns/orphan-dns-records/#how-do-orphan-records-occur","title":"How do orphan records occur?","text":"

Orphan records can occur when a DNSRecord resource (a resource that is created in response to a DNSPolicy) is deleted without allowing the owning controller time to clean up the associated records in the DNS provider. Generally in order for this to happen, you would need to force remove a finalizer from the DNSRecord resource, delete the kuadrant-system namespace directly or un-install kuadrant (delete the subscription if using OLM) without first cleaning up existing policies or delete a cluster entirely without first cleaning up the associated DNSPolicies. These are not common scenarios but when they do occur they can leave behind records in your DNS Provider which may point to IPs / Hosts that are no longer valid.

"},{"location":"kuadrant-operator/doc/user-guides/dns/orphan-dns-records/#how-do-you-spot-an-orphan-records-exist","title":"How do you spot an orphan record(s) exist?","text":"

There is a prometheus based alert that uses some metrics exposed from the DNS components to spot this situation. If you have installed the alerts for Kuadrant under the examples folder, you will see in the alerts tab an alert called PossibleOrphanedDNSRecords. When this is firing it means there are likely to be orphaned records in your provider.

"},{"location":"kuadrant-operator/doc/user-guides/dns/orphan-dns-records/#how-do-you-get-rid-of-an-orphan-record","title":"How do you get rid of an orphan record?","text":"

To remove an Orphan Record we must first identify the owner that is no longer aware of the record. To do this we need an existing DNSRecord in another cluster.

Example: You have 2 clusters that each have a gateway and share a host apps.example.com and have setup a DNSPolicy for each gateway. On cluster 1 you remove the kuadrant-system namespace without first cleaning up existing DNSPolicies targeting the gateway in your ingress-gateway namespace. Now there are a set of records that were being managed for that gateway that have not been removed. On cluster 2 the DNS Operator managing the existing DNSRecord in that cluster has a record of all owners of that dns name. In prometheus alerts, it spots that the number of owners does not correlate to the number of DNSRecord resources and triggers an alert. To remedy this rather than going to the DNS provider directly and trying to figure out which records to remove, you can instead follow the steps below.

Get the owner id of the DNSRecord on cluster 2 for the shared host

kubectl get dnsrecord somerecord -n my-gateway-ns -o=jsonpath='{.status.ownerID}'\n

Get all the owner ids

kubectl get dnsrecord.kuadrant.io somerecord -n my-gateway-ns -o=jsonpath='{.status.domainOwners}'\n\n# output\n# [\"26aacm1z\",\"49qn0wp7\"]\n

Create a placeholder DNSRecord with none active ownerID

For each owner id returned that isn't the owner id of the record that we want to remove records for, we need to create a dnsrecord resource and delete it. This will trigger the running operator in this cluster to clean up those records.

This is one of the owner id not in the existing dnsrecord on cluster

export ownerID=26aacm1z  \n\nexport rootHost=$(kubectl get dnsrecord.kuadrant.io somerecord -n  my-gateway-ns -o=jsonpath='{.spec.rootHost}')\n

Export a namespace with the aws credentials in it

export targetNS=kuadrant-system \n\nkubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n  name: delete-old-loadbalanced-dnsrecord\n  namespace: ${targetNS}\nspec:\n  providerRef:\n    name: my-aws-credentials\n  ownerID: ${ownerID}\n  rootHost: ${rootHost}\n  endpoints:\n\n    - dnsName: ${rootHost}\n      recordTTL: 60\n      recordType: CNAME\n      targets:\n        - klb.doesnt-exist.${rootHost}\nEOF\n

Delete the DNSrecord

kubectl delete dnsrecord.kuadrant.io delete-old-loadbalanced-dnsrecord -n ${targetNS} \n

Verification

We can verify that the steps worked correctly, by checking the DNSRecord again. Note it may take a several minutes for the other record to update. We can force it by adding a label to the record

kubectl label dnsrecord.kuadrant.io somerecord test=test -n ${targetNS}\n\nkubectl get dnsrecord.kuadrant.io somerecord -n my-gateway-ns -o=jsonpath='{.status.domainOwners}'\n

You should also see your alert eventually stop triggering.

"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/","title":"Secure, protect, and connect APIs with Kuadrant","text":""},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#overview","title":"Overview","text":"

This guide walks you through using Kuadrant to secure, protect, and connect an API exposed by a Gateway (Kubernetes Gateway API) from the personas platform engineer and application developer. For more information on the different personas please see the Gateway API documentation

"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#prerequisites","title":"Prerequisites","text":"
  • Kubernetes cluster with Kuadrant operator installed.
  • kubectl command line tool.
  • AWS/Azure or GCP with DNS capabilities.
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#set-the-environment-variables","title":"Set the environment variables","text":"

Set the following environment variables used for convenience in this guide:

export KUADRANT_GATEWAY_NS=api-gateway # Namespace for the example Gateway\nexport KUADRANT_GATEWAY_NAME=external # Name for the example Gateway\nexport KUADRANT_DEVELOPER_NS=toystore # Namespace for an example toystore app\nexport KUADRANT_AWS_ACCESS_KEY_ID=xxxx # AWS Key ID with access to manage the DNS Zone ID below\nexport KUADRANT_AWS_SECRET_ACCESS_KEY=xxxx # AWS Secret Access Key with access to manage the DNS Zone ID below\nexport KUADRANT_AWS_DNS_PUBLIC_ZONE_ID=xxxx # AWS Route 53 Zone ID for the Gateway\nexport KUADRANT_ZONE_ROOT_DOMAIN=example.com # Root domain associated with the Zone ID above\nexport KUADRANT_CLUSTER_ISSUER_NAME=self-signed # Name for the ClusterIssuer\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#set-up-a-dns-provider","title":"Set up a DNS Provider","text":"

The DNS provider declares credentials to access the zone(s) that Kuadrant can use to set up DNS configuration. Ensure that this credential only has access to the zones you want Kuadrant to manage via DNSPolicy

Create the namespace the Gateway will be deployed in:

kubectl create ns ${KUADRANT_GATEWAY_NS}\n

Create the secret credentials in the same namespace as the Gateway - these will be used to configure DNS:

kubectl -n ${KUADRANT_GATEWAY_NS} create secret generic aws-credentials \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=$KUADRANT_AWS_ACCESS_KEY_ID \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=$KUADRANT_AWS_SECRET_ACCESS_KEY\n

Before adding a TLS issuer, create the secret credentials in the cert-manager namespace:

kubectl -n cert-manager create secret generic aws-credentials \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=$KUADRANT_AWS_ACCESS_KEY_ID \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=$KUADRANT_AWS_SECRET_ACCESS_KEY\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#deploy-the-toystore-app","title":"Deploy the Toystore app","text":"

Create the namespace for the Toystore application:

kubectl create ns ${KUADRANT_DEVELOPER_NS}\n

Deploy the Toystore app to the developer namespace:

kubectl apply -f https://raw.githubusercontent.com/Kuadrant/Kuadrant-operator/main/examples/toystore/toystore.yaml -n ${KUADRANT_DEVELOPER_NS}\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#add-a-tls-issuer","title":"Add a TLS issuer","text":"

To secure communication to the Gateways, define a TLS issuer for TLS certificates.

Note

This example uses Let's Encrypt, but you can use any issuer supported by cert-manager.

kubectl apply -f - <<EOF\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n  name: ${KUADRANT_CLUSTER_ISSUER_NAME}\nspec:\n  selfSigned: {}\nEOF\n

Wait for the ClusterIssuer to become ready.

kubectl wait clusterissuer/${KUADRANT_CLUSTER_ISSUER_NAME} --for=condition=ready=true\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#deploy-a-gateway","title":"Deploy a Gateway","text":"
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}\n  namespace: ${KUADRANT_GATEWAY_NS}\n  labels:\n    kuadrant.io/gateway: \"true\"\nspec:\n    gatewayClassName: istio\n    listeners:\n\n    - allowedRoutes:\n        namespaces:\n          from: All \n      hostname: \"api.${KUADRANT_ZONE_ROOT_DOMAIN}\"\n      name: api\n      port: 443\n      protocol: HTTPS\n      tls:\n        certificateRefs:\n        - group: \"\"\n          kind: Secret\n          name: api-${KUADRANT_GATEWAY_NAME}-tls\n        mode: Terminate\nEOF\n

Check the status of the Gateway ensuring the gateway is Accepted and Programmed:

kubectl get gateway ${KUADRANT_GATEWAY_NAME} -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Programmed\")].message}'\n

Check the status of the listener, you will see that it is not yet programmed or ready to accept traffic due to bad TLS configuration. This will be fixed in the next step with the TLSPolicy:

kubectl get gateway ${KUADRANT_GATEWAY_NAME} -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.listeners[0].conditions[?(@.type==\"Programmed\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#secure-and-protect-the-gateway-with-auth-rate-limit-and-dns-policies","title":"Secure and protect the Gateway with Auth, Rate Limit, and DNS policies.","text":""},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#deploy-the-gateway-tls-policy","title":"Deploy the gateway TLS policy","text":"
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: TLSPolicy\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}-tls\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  targetRef:\n    name: ${KUADRANT_GATEWAY_NAME}\n    group: gateway.networking.k8s.io\n    kind: Gateway\n  issuerRef:\n    group: cert-manager.io\n    kind: ClusterIssuer\n    name: ${KUADRANT_CLUSTER_ISSUER_NAME}\nEOF\n

Check that the TLSpolicy has an Accepted and Enforced status (This may take a few minutes for certain provider e.g Lets Encrypt):

kubectl get tlspolicy ${KUADRANT_GATEWAY_NAME}-tls -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#setup-toystore-application-httproute","title":"Setup Toystore application HTTPRoute","text":"
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\n  namespace: ${KUADRANT_DEVELOPER_NS}\n  labels:\n    deployment: toystore\n    service: toystore\nspec:\n  parentRefs:\n\n  - name: ${KUADRANT_GATEWAY_NAME}\n    namespace: ${KUADRANT_GATEWAY_NS}\n  hostnames:\n  - \"api.${KUADRANT_ZONE_ROOT_DOMAIN}\"\n  rules:\n  - matches:\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/cars\"\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/health\"    \n    backendRefs:\n    - name: toystore\n      port: 80  \nEOF\n

While the Gateway is now deployed, it currently has exposed endpoints. The next steps will be defining an AuthPolicy to set up a default 403 response for any unprotected endpoints, as well as a RateLimitPolicy to set up a default unrealistic low global limit to further protect any exposed endpoints.

"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#set-the-deny-all-gateway-authpolicy","title":"Set the Deny all Gateway AuthPolicy","text":"
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}-auth\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: ${KUADRANT_GATEWAY_NAME}\n  defaults:\n   when:\n\n     - predicate: \"request.path != '/health'\"\n   rules:\n    authorization:\n      deny-all:\n        opa:\n          rego: \"allow = false\"\n    response:\n      unauthorized:\n        headers:\n          \"content-type\":\n            value: application/json\n        body:\n          value: |\n            {\n              \"error\": \"Forbidden\",\n              \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n            }\nEOF\n

Check that the AuthPolicy has Accepted and Enforced status:

kubectl get authpolicy ${KUADRANT_GATEWAY_NAME}-auth -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#deploy-the-low-limit-gateway-ratelimitpolicy","title":"Deploy the low-limit Gateway RateLimitPolicy","text":"
kubectl apply -f  - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}-rlp\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: ${KUADRANT_GATEWAY_NAME}\n  defaults:\n    limits:\n      \"low-limit\":\n        rates:\n\n        - limit: 1\n          window: 10s\nEOF\n

Check that the RateLimitPolicy has Accepted and Enforced status:

kubectl get ratelimitpolicy ${KUADRANT_GATEWAY_NAME}-rlp -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#create-the-gateway-dnspolicy","title":"Create the Gateway DNSPolicy","text":"
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}-dnspolicy\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  healthCheck:\n    failureThreshold: 3\n    interval: 1m\n    path: /health\n  loadBalancing:\n    defaultGeo: true\n    geo: GEO-NA\n    weight: 120\n  targetRef:\n    name: ${KUADRANT_GATEWAY_NAME}\n    group: gateway.networking.k8s.io\n    kind: Gateway\n  providerRefs:\n\n  - name: aws-credentials # Secret created earlier\nEOF\n

Check that the DNSPolicy has been Accepted and Enforced (This mat take a few minutes):

kubectl get dnspolicy ${KUADRANT_GATEWAY_NAME}-dnspolicy -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#dns-health-checks","title":"DNS Health checks","text":"

DNS Health checks has been enabled on the DNSPolicy. These health checks will flag a published endpoint as healthy or unhealthy based on the defined configuration. When unhealthy an endpoint will not be published if it has not already been published to the DNS provider, will only be unpublished if it is part of a multi-value A record and in all cases can be observable via the DNSPolicy status. For more information see DNS Health checks documentation

Check the status of the health checks as follow:

kubectl get dnspolicy ${KUADRANT_GATEWAY_NAME}-dnspolicy -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"SubResourcesHealthy\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#test-the-low-limit-and-deny-all-policies","title":"Test the low-limit and deny all policies","text":"
while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null  \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#override-the-gateways-deny-all-authpolicy","title":"Override the Gateway's deny-all AuthPolicy","text":""},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#set-up-api-key-auth-flow","title":"Set up API key auth flow","text":"

Set up an example API key for the new users:

export KUADRANT_SYSTEM_NS=$(kubectl get kuadrant -A -o jsonpath=\"{.items[0].metadata.namespace}\")\n
kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: bob-key\n  namespace: ${KUADRANT_SYSTEM_NS}\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: bob\nstringData:\n  api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: alice-key\n  namespace: ${KUADRANT_SYSTEM_NS}\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: alice\nstringData:\n  api_key: IAMALICE\ntype: Opaque\nEOF\n

Create a new AuthPolicy in a different namespace that overrides the Deny all created earlier:

kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore-auth\n  namespace: ${KUADRANT_DEVELOPER_NS}\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  defaults:\n   when:\n\n     - predicate: \"request.path != '/health'\"  \n   rules:\n    authentication:\n      \"api-key-users\":\n        apiKey:\n          selector:\n            matchLabels:\n              app: toystore\n        credentials:\n          authorizationHeader:\n            prefix: APIKEY\n    response:\n      success:\n        filters:\n          \"identity\":\n            json:\n              properties:\n                \"userid\":\n                  selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#override-low-limit-ratelimitpolicy-for-specific-users","title":"Override low-limit RateLimitPolicy for specific users","text":"

Create a new RateLimitPolicy in a different namespace to override the default RateLimitPolicy created earlier:

kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: toystore-rlp\n  namespace: ${KUADRANT_DEVELOPER_NS}\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  limits:\n    \"general-user\":\n      rates:\n\n      - limit: 5\n        window: 10s\n      counters:\n      - expression: auth.identity.userid\n      when:\n      - predicate: \"auth.identity.userid != 'bob'\"\n    \"bob-limit\":\n      rates:\n      - limit: 2\n        window: 10s\n      when:\n      - predicate: \"auth.identity.userid == 'bob'\"\nEOF\n

The RateLimitPolicy should be Accepted and Enforced:

kubectl get ratelimitpolicy -n ${KUADRANT_DEVELOPER_NS} toystore-rlp -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n

Check the status of the HTTPRoute, is now affected by the RateLimitPolicy in the same namespace:

kubectl get httproute toystore -n ${KUADRANT_DEVELOPER_NS} -o=jsonpath='{.status.parents[0].conditions[?(@.type==\"kuadrant.io/RateLimitPolicyAffected\")].message}'\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#test-the-new-rate-limit-and-auth-policy","title":"Test the new Rate limit and Auth policy","text":""},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#send-requests-as-alice","title":"Send requests as Alice:","text":"

You should see status 200 every second for 5 second followed by stats 429 every second for 5 seconds

while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#send-requests-as-bob","title":"Send requests as Bob:","text":"

You should see status 200 every second for 2 seconds followed by stats 429 every second for 8 seconds

while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#next-steps","title":"Next Steps","text":"
  • mTLS Configuration To learn more about Kuadrant and see more how to guides, visit Kuadrant documentation
"},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#optional","title":"Optional","text":"

If you have prometheus in your cluster, set up a PodMonitor to configure it to scrape metrics directly from the Gateway pod. This must be done in the namespace where the Gateway is running. This configuration is required for metrics such as istio_requests_total.

kubectl apply -f - <<EOF\napiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: istio-proxies-monitor\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  selector:\n    matchExpressions:\n\n      - key: istio-prometheus-ignore\n        operator: DoesNotExist\n  podMetricsEndpoints:\n    - path: /stats/prometheus\n      interval: 30s\n      relabelings:\n        - action: keep\n          sourceLabels: [\"__meta_kubernetes_pod_container_name\"]\n          regex: \"istio-proxy\"\n        - action: keep\n          sourceLabels:\n            [\"__meta_kubernetes_pod_annotationpresent_prometheus_io_scrape\"]\n        - action: replace\n          regex: (\\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})\n          replacement: \"[\\$2]:\\$1\"\n          sourceLabels:\n            [\n              \"__meta_kubernetes_pod_annotation_prometheus_io_port\",\n              \"__meta_kubernetes_pod_ip\",\n            ]\n          targetLabel: \"__address__\"\n        - action: replace\n          regex: (\\d+);((([0-9]+?)(\\.|$)){4})\n          replacement: \"\\$2:\\$1\"\n          sourceLabels:\n            [\n              \"__meta_kubernetes_pod_annotation_prometheus_io_port\",\n              \"__meta_kubernetes_pod_ip\",\n            ]\n          targetLabel: \"__address__\"\n        - action: labeldrop\n          regex: \"__meta_kubernetes_pod_label_(.+)\"\n        - sourceLabels: [\"__meta_kubernetes_namespace\"]\n          action: replace\n          targetLabel: namespace\n        - sourceLabels: [\"__meta_kubernetes_pod_name\"]\n          action: replace\n          targetLabel: pod_name\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/misc/external-api/","title":"Using Gateway API and Kuadrant with APIs outside of the cluster","text":""},{"location":"kuadrant-operator/doc/user-guides/misc/external-api/#overview","title":"Overview","text":"

In some cases, the application and API endpoints are exposed in a host external to the cluster where you are a running Gateway API and Kuadrant but you do not want it accessible directly via the public internet. If you want to have external traffic come into a Gateway API defined Gateway and protected by Kuadrant policies first being proxied to the existing legacy endpoints, this guide will give you some example of how to achieve this.

"},{"location":"kuadrant-operator/doc/user-guides/misc/external-api/#what-we-will-do","title":"What we will do","text":"
  • Have an API in a private location become accessible via a public hostname
  • Setup a gateway and HTTPRoute to expose this private API via our new Gateway on a (public) domain.
  • proxy valid requests through to our back-end API service
  • Add auth and rate limiting and TLS to our public Gateway to protect it
"},{"location":"kuadrant-operator/doc/user-guides/misc/external-api/#pre-requisites","title":"Pre Requisites","text":"
  • Kuadrant and Gateway API installed (with Istio as the gateway provider)
  • Existing API on separate cluster accessible via HTTP from the Gateway cluster

What we want to achieve:

                                ------------------- DMZ -----------------|\n                                                                         |\n                               |-------------------------------- internal network -----------------------------------| \n                    load balancer                                        |                                            |           \n                        | - |  |      |----------k8s cluster-----------| |   |----- Legacy API Location --------|     |\n                        |   |  |      |  Gateway  Kuadrant             | |   |                                  |     |       \n                        |   |  |      |   -----    -----               | |   |                                  |     |                     \n---public traffic--my.api.com-------->|   |    |<--|   |               | |   |  HTTP (my.api.local)   Backend   |     |\n                        |   |  |      |   |    |   -----               | |   |      -----             -----     |     | \n                        |   |  |      |   ----- -----------proxy---(my.api.local)-->|   | ----------> |   |     |     | \n                        |   |  |      |                                | |   |      -----             -----     |     | \n                        | - |  |      |--------------------------------| |   |----------------------------------|     | \n                               |                                         |                                            |   \n                               |-----------------------------------------|--------------------------------------------| \n                                                                         |\n                                ------------------- DMZ -----------------|       \n

Note for all of the resources defined here there is a copy of them under the examples folder

1) Deploy a Gateway into the K8s cluster that will act as the main Ingress Gateway

Define your external API hostname and Internal API hostname

export EXTERNAL_HOST=my.api.com\nexport INTERNAL_HOST=my.api.local\n
kubectl apply -n gateway-system -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  labels:\n    istio: ingress\n  name: ingress\nspec:\n  gatewayClassName: istio\n  listeners:\n\n    - name: ingress-tls\n      port: 443\n      hostname: '${EXTERNAL_HOST}'\n      protocol: HTTPS\n      allowedRoutes:\n        namespaces:\n          from: All\n      tls:\n        mode: Terminate\n        certificateRefs:\n          - name: ingress-tls  #you can use TLSPolicy to provide this certificate or provide it manually\n            kind: Secret\nEOF            \n

2) Optional: Use TLSPolicy to configure TLS certificates for your listeners

TLSPolicy Guide

3) Optional: Use DNSPolicy to bring external traffic to the external hostname

DNSPolicy Guide

4) Ensure the Gateway has the status of Programmed set to True meaning it is ready.

kubectl get gateway ingress -n gateway-system -o=jsonpath='{.status.conditions[?(@.type==\"Programmed\")].status}'\n

5) Let Istio know about the external hostname and the rules it should use when sending traffic to that destination.

Create a ServiceEntry

kubectl apply -n gateway-system -f - <<EOF\napiVersion: networking.istio.io/v1beta1\nkind: ServiceEntry\nmetadata:\n  name: internal-api\nspec:\n  hosts:\n\n    - ${INTERNAL_HOST} # your internal http endpoint\n  location: MESH_EXTERNAL\n  resolution: DNS\n  ports:\n    - number: 80\n      name: http\n      protocol: HTTP\n    - number: 443\n      name: https\n      protocol: TLS\nEOF\n

Create a DestionationRule to configure how to handle traffic to this endpoint.

kubectl apply -n gateway-system -f - <<EOF\napiVersion: networking.istio.io/v1\nkind: DestinationRule\nmetadata:\n  name: internal-api\nspec:\n  host: ${INTERNAL_HOST}\n  trafficPolicy:\n    tls:\n      mode: SIMPLE\n      sni: ${INTERNAL_HOST}\nEOF\n

6) Create a HTTPRoute that will route traffic for the Gateway and re-write the host

kubectl apply -n gateway-system -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: external-host\nspec:\n  parentRefs:\n\n    - name: ingress\n  hostnames:\n    - ${EXTERNAL_HOST}\n  rules:\n    - backendRefs:\n        - name: ${INTERNAL_HOST}\n          kind: Hostname\n          group: networking.istio.io\n          port: 443\n      filters:\n        - type: URLRewrite\n          urlRewrite:\n            hostname: ${INTERNAL_HOST}\nEOF\n

We should now be able to send requests to our external host and have the Gateway proxy requests and responses to and from the internal host.

7) (optional) Add Auth and RateLimiting to protect your public endpoint

As we are using Gateway API to define the Gateway and HTTPRoutes, we can now also apply RateLimiting and Auth to protect our public endpoints

AuthPolicy Guide

RateLimiting Guide

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/","title":"Authenticated Rate Limiting for Application developers","text":"

For more info on the different personas see Gateway API

This user guide walks you through an example of how to configure authenticated rate limiting for an application using Kuadrant.

Authenticated rate limiting rate limits the traffic directed to an application based on attributes of the client user, who is authenticated by some authentication method. A few examples of authenticated rate limiting use cases are:

  • User A can send up to 50rps (\"requests per second\"), while User B can send up to 100rps.
  • Each user can send up to 20rpm (\"request per minute\").
  • Admin users (members of the 'admin' group) can send up to 100rps, while regular users (non-admins) can send up to 20rpm and no more than 5rps.

In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API exposes an endpoint at GET http://api.toystore.com/toy, to mimic an operation of reading toy records.

We will define 2 users of the API, which can send requests to the API at different rates, based on their user IDs. The authentication method used is API key.

User ID Rate limit alice 5rp10s (\"5 requests every 10 seconds\") bob 2rp10s (\"2 requests every 10 seconds\")"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#setup-the-environment","title":"Setup the environment","text":"

Follow this setup doc to set up your environment before continuing with this doc.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#deploy-the-toy-store-api","title":"Deploy the Toy Store API","text":"

Create the deployment:

kubectl apply -f examples/toystore/toystore.yaml\n

Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:

kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - api.toystore.com\n  rules:\n  - matches:\n    - path:\n        type: Exact\n        value: \"/toy\"\n      method: GET\n    backendRefs:\n    - name: toystore\n      port: 80\nEOF\n

Export the gateway hostname and port:

export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n

Verify the route works:

curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n

Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:

kubectl port-forward -n gateway-system service/kuadrant-ingressgateway-istio 9080:80 >/dev/null 2>&1 &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#enforce-authentication-on-requests-to-the-toy-store-api","title":"Enforce authentication on requests to the Toy Store API","text":"

Create a Kuadrant AuthPolicy to configure the authentication:

kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  rules:\n    authentication:\n      \"api-key-users\":\n        apiKey:\n          selector:\n            matchLabels:\n              app: toystore\n          allNamespaces: true\n        credentials:\n          authorizationHeader:\n            prefix: APIKEY\n    response:\n      success:\n        filters:\n          \"identity\":\n            json:\n              properties:\n                \"userid\":\n                  selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n

Verify the authentication works by sending a request to the Toy Store API without API key:

curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-key-users\"\n# x-ext-auth-reason: \"credential not found\"\n

Create API keys for users alice and bob to authenticate:

Note: Kuadrant stores API keys as Kubernetes Secret resources. User metadata can be stored in the annotations of the resource.

kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: bob-key\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: bob\nstringData:\n  api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: alice-key\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: alice\nstringData:\n  api_key: IAMALICE\ntype: Opaque\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#enforce-authenticated-rate-limiting-on-requests-to-the-toy-store-api","title":"Enforce authenticated rate limiting on requests to the Toy Store API","text":"

Create a Kuadrant RateLimitPolicy to configure rate limiting:

kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  limits:\n    \"alice-limit\":\n      rates:\n\n      - limit: 5\n        window: 10s\n      when:\n      - predicate: \"auth.identity.userid == 'alice'\"\n    \"bob-limit\":\n      rates:\n      - limit: 2\n        window: 10s\n      when:\n      - predicate: \"auth.identity.userid == 'bob'\"\nEOF\n

Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

Verify the rate limiting works by sending requests as Alice and Bob.

Up to 5 successful (200 OK) requests every 10 seconds allowed for Alice, then 429 Too Many Requests:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

Up to 2 successful (200 OK) requests every 10 seconds allowed for Bob, then 429 Too Many Requests:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#cleanup","title":"Cleanup","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/","title":"Authenticated Rate Limiting with JWTs and Kubernetes RBAC","text":"

This user guide walks you through an example of how to use Kuadrant to protect an application with policies to enforce:

  • authentication based OpenId Connect (OIDC) ID tokens (signed JWTs), issued by a Keycloak server;
  • alternative authentication method by Kubernetes Service Account tokens;
  • authorization delegated to Kubernetes RBAC system;
  • rate limiting by user ID.

In this example, we will protect a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request.

The API listens to requests at the hostnames *.toystore.com, where it exposes the endpoints GET /toy*, POST /admin/toy and DELETE /amind/toy, respectively, to mimic operations of reading, creating, and deleting toy records.

Any authenticated user/service account can send requests to the Toy Store API, by providing either a valid Keycloak-issued access token or Kubernetes token.

Privileges to execute the requested operation (read, create or delete) will be granted according to the following RBAC rules, stored in the Kubernetes authorization system:

Operation Endpoint Required role Read GET /toy* toystore-reader Create POST /admin/toy toystore-write Delete DELETE /admin/toy toystore-write

Each user will be entitled to a maximum of 5rp10s (5 requests every 10 seconds).

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#setup-the-environment","title":"Setup the environment","text":"

Follow this setup doc to set up your environment before continuing with this doc.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#deploy-the-toystore-example-api","title":"Deploy the Toystore example API:","text":"
kubectl apply -f examples/toystore/toystore.yaml\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#api-lifecycle","title":"API lifecycle","text":""},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-unprotected","title":"Try the API unprotected","text":"

Export the gateway hostname and port:

export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n

It should return 200 OK.

Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:

kubectl port-forward -n gateway-system service/kuadrant-ingressgateway-istio 9080:80 >/dev/null 2>&1 &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#deploy-keycloak","title":"Deploy Keycloak","text":"

Create the namesapce:

kubectl create namespace keycloak\n

Deploy Keycloak with a bootstrap realm, users, and clients:

kubectl apply -n keycloak -f https://raw.githubusercontent.com/Kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

Note: The Keycloak server may take a couple of minutes to be ready.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#enforce-authentication-and-authorization-for-the-toy-store-api","title":"Enforce authentication and authorization for the Toy Store API","text":"

Create a Kuadrant AuthPolicy to configure authentication and authorization:

kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore-protection\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  rules:\n    authentication:\n      \"keycloak-users\":\n        jwt:\n          issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n      \"k8s-service-accounts\":\n        kubernetesTokenReview:\n          audiences:\n\n          - https://kubernetes.default.svc.cluster.local\n        overrides:\n          \"sub\":\n            selector: auth.identity.user.username\n    authorization:\n      \"k8s-rbac\":\n        kubernetesSubjectAccessReview:\n          user:\n            selector: auth.identity.sub\n    response:\n      success:\n        filters:\n          \"identity\":\n            json:\n              properties:\n                \"userid\":\n                  selector: auth.identity.sub\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-missing-authentication","title":"Try the API missing authentication","text":"
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak-users\"\n# www-authenticate: Bearer realm=\"k8s-service-accounts\"\n# x-ext-auth-reason: {\"k8s-service-accounts\":\"credential not found\",\"keycloak-users\":\"credential not found\"}\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-without-permission","title":"Try the API without permission","text":"

Obtain an access token with the Keycloak server:

ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

Send a request to the API as the Keycloak-authenticated user while still missing permissions:

curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 403 Forbidden\n

Create a Kubernetes Service Account to represent a consumer of the API associated with the alternative source of identities k8s-service-accounts:

kubectl apply -f - <<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: client-app-1\nEOF\n

Obtain an access token for the client-app-1 service account:

SA_TOKEN=$(kubectl create token client-app-1)\n

Send a request to the API as the service account while still missing permissions:

curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#grant-access-to-the-toy-store-api-for-user-and-service-account","title":"Grant access to the Toy Store API for user and service account","text":"

Create the toystore-reader and toystore-writer roles:

kubectl apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: toystore-reader\nrules:\n\n- nonResourceURLs: [\"/toy*\"]\n  verbs: [\"get\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: toystore-writer\nrules:\n- nonResourceURLs: [\"/admin/toy\"]\n  verbs: [\"post\", \"delete\"]\nEOF\n

Add permissions to the user and service account:

User Kind Roles john User registered in Keycloak toystore-reader, toystore-writer client-app-1 Kuberentes Service Account toystore-reader
kubectl apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: toystore-readers\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: toystore-reader\nsubjects:\n\n- kind: User\n  name: $(jq -R -r 'split(\".\") | .[1] | @base64d | fromjson | .sub' <<< \"$ACCESS_TOKEN\")\n- kind: ServiceAccount\n  name: client-app-1\n  namespace: default\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: toystore-writers\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: toystore-writer\nsubjects:\n- kind: User\n  name: $(jq -R -r 'split(\".\") | .[1] | @base64d | fromjson | .sub' <<< \"$ACCESS_TOKEN\")\nEOF\n
Q: Can I use Roles and RoleBindings instead of ClusterRoles and ClusterRoleBindings?

Yes, you can.

The example above is for non-resource URL Kubernetes roles. For using Roles and RoleBindings instead of ClusterRoles and ClusterRoleBindings, thus more flexible resource-based permissions to protect the API, see the spec for Kubernetes SubjectAccessReview authorization in the Authorino docs.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-with-permission","title":"Try the API with permission","text":"

Send requests to the API as the Keycloak-authenticated user:

curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' -X POST http://$GATEWAY_URL/admin/toy -i\n# HTTP/1.1 200 OK\n

Send requests to the API as the Kubernetes service account:

curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' -X POST http://$GATEWAY_URL/admin/toy -i\n# HTTP/1.1 403 Forbidden\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"Enforce rate limiting on requests to the Toy Store API","text":"

Create a Kuadrant RateLimitPolicy to configure rate limiting:

kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  limits:\n    \"per-user\":\n      rates:\n\n      - limit: 5\n        window: 10s\n      counters:\n      - expression: auth.identity.userid\nEOF\n

Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-rate-limited","title":"Try the API rate limited","text":"

Each user should be entitled to a maximum of 5 requests every 10 seconds.

Note: If the tokens have expired, you may need to refresh them first.

Send requests as the Keycloak-authenticated user:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

Send requests as the Kubernetes service account:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#cleanup","title":"Cleanup","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/","title":"Gateway Rate Limiting for Cluster Operators","text":"

For more info on the different personas see Gateway API

This user guide walks you through an example of how to configure rate limiting for all routes attached to a specific ingress gateway.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#setup-the-environment","title":"Setup the environment","text":"

Follow this setup doc to set up your environment before continuing with this doc.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#deploy-the-toystore-example-api","title":"Deploy the Toystore example API:","text":"
kubectl apply -f examples/toystore/toystore.yaml\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#create-the-ingress-gateways","title":"Create the ingress gateways","text":"
kubectl -n gateway-system apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: external\n  annotations:\n    kuadrant.io/namespace: kuadrant-system\n    networking.istio.io/service-type: ClusterIP\nspec:\n  gatewayClassName: istio\n  listeners:\n\n  - name: external\n    port: 80\n    protocol: HTTP\n    hostname: '*.io'\n    allowedRoutes:\n      namespaces:\n        from: All\n---\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: internal\n  annotations:\n    kuadrant.io/namespace: kuadrant-system\n    networking.istio.io/service-type: ClusterIP\nspec:\n  gatewayClassName: istio\n  listeners:\n  - name: local\n    port: 80\n    protocol: HTTP\n    hostname: '*.local'\n    allowedRoutes:\n      namespaces:\n        from: All\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#enforce-rate-limiting-on-requests-incoming-through-the-external-gateway","title":"Enforce rate limiting on requests incoming through the external gateway","text":"
    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n    \u2502 (Gateway) \u2502      \u2502 (Gateway) \u2502\n    \u2502  external \u2502      \u2502  internal \u2502\n    \u2502           \u2502      \u2502           \u2502\n    \u2502   *.io    \u2502      \u2502  *.local  \u2502\n    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n          \u25b2\n          \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (RateLimitPolicy) \u2502\n\u2502       gw-rlp      \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n

Create a Kuadrant RateLimitPolicy to configure rate limiting:

kubectl apply -n gateway-system -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: gw-rlp\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external\n  limits:\n    \"global\":\n      rates:\n\n      - limit: 5\n        window: 10s\nEOF\n

Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#deploy-a-sample-api-to-test-rate-limiting-enforced-at-the-level-of-the-gateway","title":"Deploy a sample API to test rate limiting enforced at the level of the gateway","text":"
                           \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u2502 (Gateway) \u2502      \u2502 (Gateway) \u2502\n\u2502 (RateLimitPolicy) \u2502      \u2502  external \u2502      \u2502  internal \u2502\n\u2502       gw-rlp      \u251c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502           \u2502      \u2502           \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2502   *.io    \u2502      \u2502  *.local  \u2502\n                           \u2514\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518      \u2514\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518\n                                 \u2502                  \u2502\n                                 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                                           \u2502\n                                 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                                 \u2502   (HTTPRoute)    \u2502\n                                 \u2502     toystore     \u2502\n                                 \u2502                  \u2502\n                                 \u2502 *.toystore.io    \u2502\n                                 \u2502 *.toystore.local \u2502\n                                 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                                          \u2502\n                                   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                                   \u2502   (Service)  \u2502\n                                   \u2502   toystore   \u2502\n                                   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#route-traffic-to-the-api-from-both-gateways","title":"Route traffic to the API from both gateways:","text":"
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: external\n    namespace: gateway-system\n  - name: internal\n    namespace: gateway-system\n  hostnames:\n  - \"*.toystore.io\"\n  - \"*.toystore.local\"\n  rules:\n  - backendRefs:\n    - name: toystore\n      port: 80\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#verify-the-rate-limiting-works-by-sending-requests-in-a-loop","title":"Verify the rate limiting works by sending requests in a loop","text":"

Expose the gateways, respectively at the port numbers 9081 and 9082 of the local host:

kubectl port-forward -n gateway-system service/external-istio 9081:80 >/dev/null 2>&1 &\nkubectl port-forward -n gateway-system service/internal-istio 9082:80 >/dev/null 2>&1 &\n

Up to 5 successful (200 OK) requests every 10 seconds through the external ingress gateway (*.io), then 429 Too Many Requests:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.io' http://localhost:9081 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

Unlimited successful (200 OK) through the internal ingress gateway (*.local):

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.local' http://localhost:9082 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#cleanup","title":"Cleanup","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/","title":"Gateway Rate Limiting","text":"

This user guide walks you through an example of how to configure multiple rate limit polices for different listeners in an ingress gateway.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#setup-the-environment","title":"Setup the environment","text":"

Follow this setup doc to set up your environment before continuing with this doc.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#deploy-the-sample-api","title":"Deploy the sample API:","text":"
kubectl apply -f examples/toystore/toystore.yaml\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#create-the-ingress-gateways","title":"Create the ingress gateways","text":"
kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: environment\n  annotations:\n    kuadrant.io/namespace: kuadrant-system\n    networking.istio.io/service-type: ClusterIP\nspec:\n  gatewayClassName: istio\n  listeners:\n\n  - name: external\n    port: 80\n    protocol: HTTP\n    hostname: '*.io'\n    allowedRoutes:\n      namespaces:\n        from: All\n  - name: local\n    port: 80\n    protocol: HTTP\n    hostname: '*.local'\n    allowedRoutes:\n      namespaces:\n        from: All\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#route-traffic-to-the-api-from-both-gateways-listeners","title":"Route traffic to the API from both gateways listeners","text":"
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: environment\n    namespace: kuadrant-system\n  hostnames:\n  - \"*.toystore.io\"\n  - \"*.toystore.local\"\n  rules:\n  - backendRefs:\n    - name: toystore\n      port: 80\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#create-a-kuadrant-ratelimitpolicy-to-configure-rate-limiting-for-the-external-listener","title":"Create a Kuadrant RateLimitPolicy to configure rate limiting for the external listener:","text":"
kubectl apply -n kuadrant-system -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: RateLimitPolicy\nmetadata:\n  name: gw-rlp-external\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: environment\n    sectionName: external\n  defaults:\n    strategy: merge\n    limits:\n      \"external\":\n        rates:\n\n        - limit: 2\n          window: 10s\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#create-a-kuadrant-ratelimitpolicy-to-configure-rate-limiting-for-the-local-listener","title":"Create a Kuadrant RateLimitPolicy to configure rate limiting for the local listener:","text":"
kubectl apply -n kuadrant-system -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: RateLimitPolicy\nmetadata:\n  name: gw-rlp-local\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: environment\n    sectionName: local\n  defaults:\n    strategy: merge\n    limits:\n      \"local\":\n        rates:\n\n        - limit: 5\n          window: 10s\nEOF\n

Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#verify-the-rate-limiting-works-by-sending-requests-in-a-loop","title":"Verify the rate limiting works by sending requests in a loop","text":"

Expose the gateways, respectively at the port numbers 9081 and 9082 of the local host:

kubectl port-forward -n gateway-system service/environment-istio 9081:80 >/dev/null 2>&1 &\n

Up to 5 successful (200 OK) requests every 10 seconds through the external ingress gateway (*.io), then 429 Too Many Requests:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.io' http://localhost:9081 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

Unlimited successful (200 OK) through the internal ingress gateway (*.local):

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.local' http://localhost:9081 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#cleanup","title":"Cleanup","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/","title":"Multi authenticated Rate Limiting for an Application","text":"

This user guide walks you through an example of how to configure multiple authenticated rate limiting for an application using Kuadrant.

Authenticated rate limiting, rate limits the traffic directed to an application based on attributes of the client user, who is authenticated by some authentication method. A few examples of authenticated rate limiting use cases are:

  • User A can send up to 50rps (\"requests per second\"), while User B can send up to 100rps.
  • Each user can send up to 20rpm (\"request per minute\").
  • Admin users (members of the 'admin' group) can send up to 100rps, while regular users (non-admins) can send up to 20rpm and no more than 5rps.

In this guide, we will rate limit a sample REST API called Toy Store, an echo service that echoes back to the user whatever attributes it gets in the request. The API exposes an endpoint at GET http://api.toystore.com/toy, to mimic an operation of reading toy records.

We will define 2 users of the API, which can send requests to the API at different rates, based on their user IDs. The authentication method used is API key.

User ID Rate limit alice 5rp10s (\"5 requests every 10 seconds\") bob 2rp10s (\"2 requests every 10 seconds\")"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#setup-the-environment","title":"Setup the environment","text":"

Follow this setup doc to set up your environment before continuing with this doc.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#deploy-the-toy-store-api","title":"Deploy the Toy Store API","text":"
kubectl apply -f examples/toystore/toystore.yaml\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#create-a-httproute-to-route-traffic-to-the-service-via-istio-ingress-gateway","title":"Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:","text":"
kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - api.toystore.com\n  rules:\n  - matches:\n    - path:\n        type: Exact\n        value: \"/toy\"\n      method: GET\n    - path:\n        type: Exact\n        value: \"/car\"\n      method: GET\n    backendRefs:\n    - name: toystore\n      port: 80\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#export-the-gateway-hostname-and-port","title":"Export the gateway hostname and port:","text":"
export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#verify-the-route-works","title":"Verify the route works:","text":"
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n

Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:

kubectl port-forward -n gateway-system service/kuadrant-ingressgateway-istio 9080:80 >/dev/null 2>&1 &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#enforce-authentication-on-requests-to-the-toy-store-api","title":"Enforce authentication on requests to the Toy Store API","text":"

Create a Kuadrant AuthPolicy to configure the authentication:

kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: AuthPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  rules:\n    authentication:\n      \"api-key-users\":\n        apiKey:\n          selector:\n            matchLabels:\n              app: toystore\n          allNamespaces: true\n        credentials:\n          authorizationHeader:\n            prefix: APIKEY\n    response:\n      success:\n        filters:\n          \"identity\":\n            json:\n              properties:\n                \"userid\":\n                  selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#verify-the-authentication-works-by-sending-a-request-to-the-toy-store-api-without-api-key","title":"Verify the authentication works by sending a request to the Toy Store API without API key:","text":"
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-key-users\"\n# x-ext-auth-reason: \"credential not found\"\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#create-api-keys-for-users-alice-and-bob-to-authenticate","title":"Create API keys for users alice and bob to authenticate:","text":"

Note: Kuadrant stores API keys as Kubernetes Secret resources. User metadata can be stored in the annotations of the resource.

kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: bob-key\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: bob\nstringData:\n  api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: alice-key\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: alice\nstringData:\n  api_key: IAMALICE\ntype: Opaque\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#enforce-authenticated-rate-limiting-on-requests-to-the-toy-store-api","title":"Enforce authenticated rate limiting on requests to the Toy Store API","text":"

Create Kuadrant RateLimitPolicy's to configure rate limiting for Bob and Alice:

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#bob","title":"Bob","text":"
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: RateLimitPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n    sectionName: rule-1\n  defaults:\n    strategy: merge\n    limits:\n      \"bob-limit\":\n        rates:\n\n        - limit: 2\n          window: 10s\n        when:\n        - predicate: \"auth.identity.userid == 'bob'\"\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#alice","title":"Alice","text":"
kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: RateLimitPolicy\nmetadata:\n  name: toystore-alice\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n    sectionName: rule-1\n  defaults:\n    strategy: merge\n    limits:\n      \"alice-limit\":\n        rates:\n\n        - limit: 5\n          window: 10s\n        when:\n        - predicate: \"auth.identity.userid == 'alice'\"\nEOF\n

Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

Verify the rate limiting works by sending requests as Alice and Bob.

Up to 5 successful (200 OK) requests every 10 seconds allowed for Alice, then 429 Too Many Requests:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

Up to 2 successful (200 OK) requests every 10 seconds allowed for Bob, then 429 Too Many Requests:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#cleanup","title":"Cleanup","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/","title":"Simple Rate Limiting for Application developers","text":"

For more info on the different personas see Gateway API

This user guide walks you through an example of how to configure rate limiting for an endpoint of an application using Kuadrant.

In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API listens to requests at the hostname api.toystore.com, where it exposes the endpoints GET /toys* and POST /toys, respectively, to mimic operations of reading and writing toy records.

We will rate limit the POST /toys endpoint to a maximum of 5rp10s (\"5 requests every 10 seconds\").

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/#setup-the-environment","title":"Setup the environment","text":"

Follow this setup doc to set up your environment before continuing with this doc.

"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/#deploy-the-toy-store-api","title":"Deploy the Toy Store API","text":"

Create the deployment:

kubectl apply -f examples/toystore/toystore.yaml\n

Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:

kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - api.toystore.com\n  rules:\n  - matches:\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/toys\"\n    backendRefs:\n    - name: toystore\n      port: 80\n  - matches: # it has to be a separate HTTPRouteRule so we do not rate limit other endpoints\n    - method: POST\n      path:\n        type: Exact\n        value: \"/toys\"\n    backendRefs:\n    - name: toystore\n      port: 80\nEOF\n

Export the gateway hostname and port:

export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n

Verify the route works:

curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -i\n# HTTP/1.1 200 OK\n

Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:

kubectl port-forward -n gateway-system service/kuadrant-ingressgateway-istio 9080:80 >/dev/null 2>&1 &\nexport GATEWAY_URL=localhost:9080\n
curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -i\n# HTTP/1.1 200 OK\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/#enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"Enforce rate limiting on requests to the Toy Store API","text":"

Create a Kuadrant RateLimitPolicy to configure rate limiting:

kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n    sectionName: rule-2\n  limits:\n    \"create-toy\":\n      rates:\n\n      - limit: 5\n        window: 10s\n      when:\n      - predicate: \"request.method == 'POST'\"\nEOF\n

Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

Verify the rate limiting works by sending requests in a loop.

Up to 5 successful (200 OK) requests every 10 seconds to POST /toys, then 429 Too Many Requests:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -X POST | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

Unlimited successful (200 OK) to GET /toys:

while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/#cleanup","title":"Cleanup","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/","title":"Gateway TLS for Cluster Operators","text":"

This user guide walks you through an example of how to configure TLS for all routes attached to an ingress gateway.

"},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#requisites","title":"Requisites","text":"
  • Docker
"},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#setup","title":"Setup","text":"

This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API, CertManager and Kuadrant itself.

Clone the project:

git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n

Setup the environment:

make local-setup\n

Create a namespace:

kubectl create namespace my-gateways\n
"},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#create-an-ingress-gateway","title":"Create an ingress gateway","text":"

Create a gateway:

kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: prod-web\nspec:\n  gatewayClassName: istio\n  listeners:\n\n    - allowedRoutes:\n        namespaces:\n          from: All\n      name: api\n      hostname: \"*.toystore.local\"\n      port: 443\n      protocol: HTTPS\n      tls:\n        mode: Terminate\n        certificateRefs:\n          - name: toystore-local-tls\n            kind: Secret\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#enable-tls-on-the-gateway","title":"Enable TLS on the gateway","text":"

The TLSPolicy requires a reference to an existing CertManager Issuer.

Create a CertManager Issuer:

kubectl apply -n my-gateways -f - <<EOF\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n  name: selfsigned-issuer\nspec:\n  selfSigned: {}\nEOF\n

Note: We are using a self-signed issuer here but any supported CerManager issuer or cluster issuer can be used.

kubectl get issuer selfsigned-issuer -n my-gateways\n

Response:

NAME                        READY   AGE\nselfsigned-issuer   True    18s\n

Create a Kuadrant TLSPolicy to configure TLS:

kubectl apply -n my-gateways -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: TLSPolicy\nmetadata:\n  name: prod-web\nspec:\n  targetRef:\n    name: prod-web\n    group: gateway.networking.k8s.io\n    kind: Gateway\n  issuerRef:\n    group: cert-manager.io\n    kind: Issuer\n    name: selfsigned-issuer\nEOF\n

Check policy status:

kubectl get tlspolicy -o wide -n my-gateways\n

Response:

NAME       STATUS     TARGETREFKIND   TARGETREFNAME   AGE\nprod-web   Accepted   Gateway         prod-web        13s\n

Check a Certificate resource was created:

kubectl get certificates -n my-gateways\n

Response

NAME                 READY   SECRET               AGE\ntoystore-local-tls   True    toystore-local-tls   7m30s\n

Check a TLS Secret resource was created:

kubectl get secrets -n my-gateways --field-selector=\"type=kubernetes.io/tls\"\n

Response:

NAME                 TYPE                DATA   AGE\ntoystore-local-tls   kubernetes.io/tls   3      7m42s\n
"},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#deploy-a-sample-api-to-test-tls","title":"Deploy a sample API to test TLS","text":"

Deploy the sample API:

kubectl -n my-gateways apply -f examples/toystore/toystore.yaml\nkubectl -n my-gateways wait --for=condition=Available deployments toystore --timeout=60s\n

Route traffic to the API from our gateway:

kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: prod-web\n    namespace: my-gateways\n  hostnames:\n  - \"*.toystore.local\"\n  rules:\n  - backendRefs:\n    - name: toystore\n      port: 80\nEOF\n
"},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#verify-tls-works-by-sending-requests","title":"Verify TLS works by sending requests","text":"

Get the gateway address@

GWADDRESS=`kubectl get gateway/prod-web -n my-gateways -o=jsonpath='{.status.addresses[?(@.type==\"IPAddress\")].value}'`\necho $GWADDRESS\n

Response:

172.18.200.1\n

Verify we can access the service via TLS:

curl -vkI https://api.toystore.local --resolve \"api.toystore.local:443:$GWADDRESS\"\n

Response:

* Added api.toystore.local:443:172.18.200.1 to DNS cache\n* Hostname api.toystore.local was found in DNS cache\n*   Trying 172.18.200.1:443...\n* Connected to api.toystore.local (172.18.200.1) port 443 (#0)\n* ALPN: offers h2\n* ALPN: offers http/1.1\n* TLSv1.0 (OUT), TLS header, Certificate Status (22):\n* TLSv1.3 (OUT), TLS handshake, Client hello (1):\n* TLSv1.2 (IN), TLS header, Certificate Status (22):\n* TLSv1.3 (IN), TLS handshake, Server hello (2):\n* TLSv1.2 (IN), TLS header, Finished (20):\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):\n* TLSv1.3 (IN), TLS handshake, Certificate (11):\n* TLSv1.3 (IN), TLS handshake, CERT verify (15):\n* TLSv1.3 (IN), TLS handshake, Finished (20):\n* TLSv1.2 (OUT), TLS header, Finished (20):\n* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.3 (OUT), TLS handshake, Finished (20):\n* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384\n* ALPN: server accepted h2\n* Server certificate:\n*  subject: [NONE]\n*  start date: Feb 15 11:46:50 2024 GMT\n*  expire date: May 15 11:46:50 2024 GMT\n* Using HTTP2, server supports multiplexing\n* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* h2h3 [:method: HEAD]\n* h2h3 [:path: /]\n* h2h3 [:scheme: https]\n* h2h3 [:authority: api.toystore.local]\n* h2h3 [user-agent: curl/7.85.0]\n* h2h3 [accept: */*]\n* Using Stream ID: 1 (easy handle 0x5623e4fe5bf0)\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n> HEAD / HTTP/2\n> Host: api.toystore.local\n> user-agent: curl/7.85.0\n> accept: */*\n>\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):\n* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):\n* old SSL session ID is stale, removing\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* Connection state changed (MAX_CONCURRENT_STREAMS == 2147483647)!\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n< HTTP/2 200\nHTTP/2 200\n< content-type: application/json\ncontent-type: application/json\n< server: istio-envoy\nserver: istio-envoy\n< date: Thu, 15 Feb 2024 12:13:27 GMT\ndate: Thu, 15 Feb 2024 12:13:27 GMT\n< content-length: 1658\ncontent-length: 1658\n< x-envoy-upstream-service-time: 1\nx-envoy-upstream-service-time: 1\n\n<\n\n* Connection #0 to host api.toystore.local left intact\n
"},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#cleanup","title":"Cleanup","text":"
make local-cleanup\n
"},{"location":"kuadrant-operator/examples/alerts/","title":"Index","text":""},{"location":"kuadrant-operator/examples/alerts/#slo-multi-burn-rate-multi-window-alerts","title":"SLO Multi burn rate multi window alerts","text":"

Kuadrant have created two example SLO alerts to help give ideas on the types of SLO alerts that could be used with the operator. We have created one alert for latency and one for availability, both are Multiwindow, Multi-Burn-Rate Alerts. The alerts show a scenario where a 28d rolling window is used and a uptime of 99.95% i.e only 0.05% error budget margin is desired. This in real world time would be downtime of around:

Time Frame Duration Daily: 43s Weekly: 5m 2.4s Monthly: 21m 44s Quarterly: 1h 5m 12s Yearly: 4h 20m 49s

These values can be changed to suit different scenarios

"},{"location":"kuadrant-operator/examples/alerts/#sloth","title":"Sloth","text":"

Sloth is a tool to aid in the creation of multi burn rate and multi window SLO alerts and was used to create both the availability and latency alerts. It follows the common standard set out by Google's SRE book. Sloth generates alerts based on specific specs given. The specs for our example alerts can be found in the example/sloth folder.

"},{"location":"kuadrant-operator/examples/alerts/#metrics-used-for-the-alerts","title":"Metrics used for the alerts","text":""},{"location":"kuadrant-operator/examples/alerts/#availability","title":"Availability","text":"

For the availability SLO alerts the Istio metric istio_requests_total was used as its a counter type metric meaning the values can only increase as well as it gives information on all requests handled by the Istio proxy.

"},{"location":"kuadrant-operator/examples/alerts/#latency","title":"Latency","text":"

For the availability SLO alerts the Istio metric istio_request_duration_milliseconds was used as its a Histogram.

"},{"location":"kuadrant-operator/examples/alerts/#sloth-generation","title":"Sloth generation","text":"

You can modify the examples Sloth specs we have and regenerate the prometheus rules using the Sloth CLI and the generate command. For more information please the Sloth website

sloth generate -i examples/alerts/sloth/latency.yaml --default-slo-period=28d\n
You can also use the make target to generate the rules to.

make sloth-generate\n
"},{"location":"kuadrant-operator/examples/alerts/#prometheus-unit-tests","title":"Prometheus unit tests","text":"

There are also two matching unit tests to verify and test the alerts that Sloth has generated. These can be run using the make target:

make test-alerts\n

Note: The prometheus unit tests will also run via Github actions when a change is made in the alerts file.

"},{"location":"authorino/","title":"Authorino","text":"

Kubernetes-native authorization service for tailor-made Zero Trust API security.

A lightweight Envoy external authorization server fully manageable via Kubernetes Custom Resources. JWT authentication, API key, mTLS, pattern-matching authz, OPA, K8s SA tokens, K8s RBAC, external metadata fetching, and more, with minimum to no coding at all, no rebuilding of your applications.

Authorino is not about inventing anything new. It's about making the best things about auth out there easy and simple to use. Authorino is multi-tenant, it's cloud-native and it's open source.

"},{"location":"authorino/#getting-started","title":"Getting started","text":"
  1. Deploy with the Authorino Operator
  2. Setup Envoy proxy and the external authorization filter
  3. Apply an Authorino AuthConfig custom resource
  4. Obtain an authentication token and start sending requests

The full Getting started page of the docs provides details for the steps above, as well as information about requirements and next steps.

Or try out our Hello World example.

For general information about protecting your service using Authorino, check out the docs.

"},{"location":"authorino/#use-cases","title":"Use-cases","text":"

The User guides section of the docs gathers several AuthN/AuthZ use-cases as well as the instructions to implement them using Authorino. A few examples are:

  • Authentication with JWTs and OpenID Connect Discovery
  • Authentication with API keys
  • Authentication with Kubernetes SA tokens (TokenReview API)
  • Authentication with X.509 certificates and mTLS
  • Authorization with JSON pattern-matching rules (e.g. JWT claims, request attributes, etc)
  • Authorization with Open Policy Agent (OPA) Rego policies
  • Authorization using the Kubernetes RBAC (rules stated in K8s Role and RoleBinding resources)
  • Authorization using auth metadata fetched from external sources
  • OIDC authentication and RBAC with Keycloak JWTs
  • Injecting auth data into the request (HTTP headers, Wristband tokens, rate-limit metadata, etc)
  • Authorino for the Kubernetes control plane (aka Authorino as ValidatingWebhook service)
"},{"location":"authorino/#how-it-works","title":"How it works","text":"

Authorino enables hybrid API security, with usually no code changes required to your application, tailor-made for your own combination of authentication standards and protocols and authorization policies of choice.

Authorino implements Envoy Proxy's external authorization gRPC protocol, and is a part of Red Hat Kuadrant architecture.

Under the hood, Authorino is based on Kubernetes Custom Resource Definitions and the Operator pattern.

Bootstrap and configuration:

  1. Deploy the service/API to be protected (\"Upstream\"), Authorino and Envoy
  2. Write and apply an Authorino AuthConfig Custom Resource associated to the public host of the service

Request-time:

  1. A user or service account (\"Consumer\") obtains an access token to consume resources of the Upstream service, and sends a request to the Envoy ingress endpoint
  2. The Envoy proxy establishes fast gRPC connection with Authorino carrying data of the HTTP request (context info), which causes Authorino to lookup for an AuthConfig Custom Resource to enforce (pre-cached)
  3. Identity verification (authentication) phase - Authorino verifies the identity of the consumer, where at least one authentication method/identity provider must go through
  4. External metadata phase - Authorino fetches additional metadata for the authorization from external sources (optional)
  5. Policy enforcement (authorization) phase - Authorino takes as input a JSON composed out of context data, resolved identity object and fetched additional metadata from previous phases, and triggers the evaluation of user-defined authorization policies
  6. Response (metadata-out) phase \u2013 Authorino builds user-defined custom responses (dynamic JSON objects and/or Festival Wristband OIDC tokens), to be supplied back to the client and/or upstream service within added HTTP headers or as Envoy Dynamic Metadata (optional)
  7. Callbacks phase \u2013 Authorino sends callbacks to specified HTTP endpoints (optional)
  8. Authorino and Envoy settle the authorization protocol with either OK/NOK response
  9. If authorized, Envoy triggers other HTTP filters in the chain (if any), pre-injecting eventual dynamic metadata returned by Authorino, and ultimately redirects the request to the Upstream
  10. The Upstream serves the requested resource to the consumer
More

The Architecture section of the docs covers details of protecting your APIs with Envoy and Authorino, including information about topology (centralized gateway, centralized authorization service or sidecars), deployment modes (cluster-wide reconciliation vs. namespaced instances), an specification of Authorino's AuthConfig Custom Resource Definition (CRD) and more.

You will also find in that section information about what happens in request-time (aka Authorino's Auth Pipeline) and how to leverage the Authorization JSON for writing policies, dynamic responses and other features of Authorino.

"},{"location":"authorino/#list-of-features","title":"List of features","text":"Feature Stage Identity verification & authentication JOSE/JWT validation (OpenID Connect) Ready OAuth 2.0 Token Introspection (opaque tokens) Ready Kubernetes TokenReview (SA tokens) Ready OpenShift User-echo endpoint In analysis API key authentication Ready mTLS authentication Ready HMAC authentication Planned (#9) Plain (resolved beforehand and injected in the payload) Ready Anonymous access Ready Ad hoc external metadata fetching OpenID Connect User Info Ready UMA-protected resource attributes Ready HTTP GET/GET-by-POST Ready Policy enforcement/authorization JSON pattern matching (e.g. JWT claims, request attributes checking) Ready OPA/Rego policies (inline and pull from registry) Ready Kubernetes SubjectAccessReview (resource and non-resource attributes) Ready Authzed/SpiceDB Ready Keycloak Authorization Services (UMA-compliant Authorization API) In analysis Custom responses Festival Wristbands tokens (token normalization, Edge Authentication Architecture) Ready JSON injection (header injection, Envoy Dynamic Metadata) Ready Plain text value (header injection) Ready Custom response status code/messages (e.g. redirect) Ready Callbacks HTTP endpoints Ready Caching OpenID Connect and User-Managed Access configs Ready JSON Web Keys (JWKs) and JSON Web Key Sets (JWKS) Ready Access tokens Ready External metadata Ready Precompiled Rego policies Ready Policy evaluation Ready Sharding (lookup performance, multitenancy) Ready

For a detailed description of the features above, refer to the Features page.

"},{"location":"authorino/#faq","title":"FAQ","text":"Do I need to deploy Envoy?

Authorino is built from the ground up to work well with Envoy. It is strongly recommended that you leverage Envoy along side Authorino. That said, it is possible to use Authorino without Envoy.

Authorino implements Envoy's external authorization gRPC protocol and therefore will accept any client request that complies.

Authorino also provides a second interface for raw HTTP authorization, suitable for using with Kubernetes ValidatingWebhook and other integrations (e.g. other proxies).

The only attribute of the authorization request that is strictly required is the host name. (See Host lookup for more information.) The other attributes, such as method, path, headers, etc, might as well be required, depending on each AuthConfig. In the case of the gRPC CheckRequest method, the host is supplied in Attributes.Request.Http.Host and alternatively in Attributes.ContextExtensions[\"host\"]. For raw HTTP authorization requests, the host must be supplied in Host HTTP header.

Check out Kuadrant for easy-to-use Envoy and Authorino deployment & configuration for API management use-cases, using Kubernetes Custom Resources.

Is Authorino an Identity Provider (IdP)?

No, Authorino is not an Identity Provider (IdP). Neither it is an auth server of any kind, such as an OAuth2 server, an OpenID Connect (OIDC) server, a Single Sign On (SSO) server.

Authorino is not an identity broker either. It can verify access tokens from multiple trusted sources of identity and protocols, but it will not negotiate authentication flows for non-authenticated access requests. Some tricks nonetheless can be done, for example, to redirect unauthenticated users to a login page.

For an excellent auth server that checks all the boxes above, check out Keycloak.

How does Authorino compare to Keycloak?

Keycloak is a proper auth server and identity provider (IdP). It offers a huge set of features for managing identities, identity sources with multiple user federation options, and a platform for authentication and authorization services.

Keycloak exposes authenticators that implement protocols such as OpenID Connect. The is a one-time flow that establishes the delegation of power to a client, for a short period of time. To be consistent with Zero Trust security, you want a validator to verify the short-lived tokens in every request that tries to reach your protected service/resource. This step that will repeat everytime could save heavy looking up into big tables of tokens and leverage cached authorization policies for fast in-memory evaluation. This is where Authorino comes in.

Authorino verifies and validates Keycloak-issued ID tokens. OpenID Connect Discovery is used to request and cache JSON Web Key Sets (JWKS), used to verify the signature of the tokens without having to contact again with the Keycloak server, or looking in a table of credentials. Moreover, user long-lived credentials are safe, rather than spread in hops across the network.

You can also use Keycloak for storing auth-relevant resource metadata. These can be fetched by Authorino in request-time, to be combined into your authorization policies. See Keycloak Authorization Services and User-Managed Access (UMA) support, as well as Authorino UMA external metadata counter-part.

Why doesn't Authorino handle OAuth flows?

It has to do with trust. OAuth grants are supposed to be negotiated directly between whoever owns the long-lived credentials in one hand (user, service accounts), and the trustworthy auth server that receives those credentials \u2013 ideally with minimum number of hops in the middle \u2013 and exchanges them for short-lived access tokens, on the other end.

There are use-cases for Authorino running in the edge (e.g. Edge Authentication Architecture and token normalization), but in most cases Authorino should be seen as a last-mile component that provides decoupled identity verification and authorization policy enforcement to protected services in request-time. In this sense, the OAuth grant is a pre-flight exchange that happens once and as direct and safe as possible, whereas auth enforcement is kept lightweight and efficient.

Where does Authorino store users and roles?

Authorino does not store users, roles, role bindings, access control lists, or any raw authorization data. Authorino handles policies, where even these policies can be stored elsewhere (as opposed to stated inline inside of an Authorino AuthConfig CR).

Authorino evaluates policies for stateless authorization requests. Any additional context is either resolved from the provided payload or static definitions inside the policies. That includes extracting user information from a JWT or client TLS certificate, requesting user metadata from opaque authentication tokens (e.g. API keys) to the trusted sources actually storing that content, obtaining synchronous HTTP metadata from services, etc.

In the case of authentication with API keys, as well as its derivative to model HTTP Basic Auth, user data are stored in Kubernetes Secrets. The secret's keys, annotations and labels are usually the structures used to organize the data that later a policy evaluated in Authorino may require. Strictly, those are not Authorino data structures.

Can't I just use Envoy JWT Authentication and RBAC filters?

Envoy's JWT Authentication works pretty much similar to Authorino's JOSE/JWT verification and validation for OpenID Connect. In both cases, the JSON Web Key Sets (JWKS) to verify the JWTs are auto-loaded and cached to be used in request-time. Moreover, you can configure for details such as where to extract the JWT from the HTTP request (header, param or cookie) and do some cool tricks regarding how dynamic metadata based on JWT claims can be injected to consecutive filters in the chain.

However, in terms of authorization, while Envoy's implementation essentially allows to check for the list of audiences (aud JWT claim), Authorino opens up for a lot more options such as pattern-matching rules with operators and conditionals, built-in OPA and other methods of evaluating authorization policies.

Authorino also allows to combine JWT authentication with other types of authentication to support different sources of identity and groups of users such as API keys, Kubernetes tokens, OAuth opaque tokens , etc.

In summary, Envoy's JWT Authentication and Envoy RBAC filter are excellent solutions for simple use-cases where JWTs from one single issuer is the only authentication method you are planning to support and limited to no authorization rules suffice. On the other hand, if you need to integrate more identity sources, different types of authentication, authorization policies, etc, you might to consider Authorino.

Should I use Authorino if I already have Istio configured?

Istio is a great solution for managing service meshes. It delivers an excellent platform with an interesting layer of abstraction on top of Envoy proxy's virtual omnipresence within the mesh.

There are lots of similarities, but also complementarity between Authorino and Istio and Istio Authorization in special.

Istio provides a simple way to enable features that are, in many cases, features of Envoy, such as authorization based on JWTs, authorization based on attributes of the request, and activation of external authorization services, without having to deal with complex Envoy config files. See Kuadrant for a similar approach, nonetheless leveraging features of Istio as well.

Authorino is an Envoy-compatible external authorization service. One can use Authorino with or without Istio.

In particular, Istio Authorization Policies can be seen, in terms of functionality and expressiveness, as a subset of one type of authorization policies supported by Authorino, the pattern-matching authorization policies. While Istio, however, is heavily focused on specific use cases of API Management, offering a relatively limited list of supported attribute conditions, Authorino is more generic, allowing to express authorization rules for a wider spectrum of use cases \u2013 ACLs, RBAC, ABAC, etc, pretty much counting on any attribute of the Envoy payload, identity object and external metadata available.

Authorino also provides built-in OPA authorization, several other methods of authentication and identity verification (e.g. Kubernetes token validation, API key-based authentication, OAuth token introspection, OIDC-discoverable JWT verification, etc), and features like fetching of external metadata (HTTP services, OIDC userinfo, UMA resource data), token normalization, wristband tokens and dynamic responses. These all can be used independently or combined, in a simple and straightforward Kubernetes-native fashion.

In summary, one might value Authorino when looking for a policy enforcer that offers:

  1. multiple supported methods and protocols for rather hybrid authentication, encompassing future and legacy auth needs;
  2. broader expressiveness and more functionalities for the authorization rules;
  3. authentication and authorization in one single declarative manifest;
  4. capability to fetch auth metadata from external sources on-the-fly;
  5. built-in OPA module;
  6. easy token normalization and/or aiming for Edge Authentication Architecture (EAA).

The good news is that, if you have Istio configured, then you have Envoy and the whole platform for wiring Authorino up if you want to. \ud83d\ude09

Do I have to learn OPA/Rego language to use Authorino?

No, you do not. However, if you are comfortable with Rego from Open Policy Agent (OPA), there are some quite interesting things you can do in Authorino, just as you would in any OPA server or OPA plugin, but leveraging Authorino's built-in OPA module instead. Authorino's OPA module is compiled as part of Authorino's code directly from the Golang packages, and imposes no extra latency to the evaluation of your authorization policies. Even the policies themselves are pre-compiled in reconciliation-time, for fast evaluation afterwards, in request-time.

On the other hand, if you do not want to learn Rego or in any case would like to combine it with declarative and Kubernetes-native authN/authZ spec for your services, Authorino does complement OPA with at least two other methods for expressing authorization policies \u2013 i.e. pattern-matching authorization and Kubernetes SubjectAccessReview, the latter allowing to rely completely on the Kubernetes RBAC.

You break down, mix and combine these methods and technolgies in as many authorization policies as you want, potentially applying them according to specific conditions. Authorino will trigger the evaluation of concurrent policies in parallel, aborting the context if any of the processes denies access.

Authorino also packages well-established industry standards and protocols for identity verification (JOSE/JWT validation, OAuth token introspection, Kubernetes TokenReview) and ad-hoc request-time metadata fetching (OIDC userinfo, User-Managed Access (UMA)), and corresponding layers of caching, without which such functionalities would have to be implemented by code.

Can I use Authorino to protect non-REST APIs?

Yes, you can. In principle, the API format (REST, gRPC, GraphQL, etc) should not matter for the authN/authZ enforcer. There are a couple points to consider though.

While REST APIs are designed in a way that, in most cases, information usually needed for the evaluation of authorization policies are available in the metadata of the HTTP request (method, path, headers), other API formats quite often will require processing of the HTTP body. By default, Envoy's external authorization HTTP filter will not forward the body of the request to Authorino; to change that, enable the with_request_body option in the Envoy configuration for the external authorization filter. E.g.:

with_request_body:\n  max_request_bytes: 1024\n  allow_partial_message: true\n  pack_as_bytes: true\n

Additionally, when enabling the request body passed in the payload to Authorino, parsing of the content should be of concern as well. Authorino provides easy access to attributes of the HTTP request, parsed as part of the Authorization JSON, however the body of the request is passed as string and should be parsed by the user according to each case.

Check out Authorino OPA authorization and the Rego Encoding functions for options to parse serialized JSON, YAML and URL-encoded params. For XML transformation, an external parsing service connected via Authorino's HTTP GET/GET-by-POST external metadata might be required.

Can I run Authorino other than on Kubernetes?

As of today, no, you cannot, or at least it wouldn't suit production requirements.

Do I have to be admin of the cluster to install Authorino?

To install the Authorino Custom Resource Definition (CRD) and to define cluster roles required by the Authorino service, admin privilege to the Kubernetes cluster is required. This step happens only once per cluster and is usually equivalent to installing the Authorino Operator.

Thereafter, deploying instances of the Authorino service and applying AuthConfig custom resources to a namespace depend on the permissions set by the cluster administrator \u2013 either directly by editing the bindings in the cluster's RBAC, or via options of the operator. In most cases, developers will be granted permissions to create and manage AuthConfigs, and sometimes to deploy their own instances of Authorino.

Is it OK to store AuthN/AuthZ configs as Kubernetes objects?

Authorino's API checks all the bullets to be aggregated to the Kubernetes cluster APIs, and therefore using Custom Resource Definition (CRD) and the Operator pattern has always been an easy design decision.

By merging the definitions of service authN/authZ to the control plane, Authorino AuthConfig resources can be thought as extensions of the specs of the desired state of services regarding the data flow security. The Authorino custom controllers, built-in into the authorization service, are the agents that read from that desired state and reconcile the processes operating in the data plane.

Authorino is declarative and seamless for developers and cluster administrators managing the state of security of the applications running in the server, used to tools such as kubectl, the Kubernetes UI and its dashboards. Instead of learning about yet another configuration API format, Authorino users can jump straight to applying and editing YAML or JSON structures they already know, in a way that things such as spec, status, namespace and labels have the meaning they are expected to have, and docs are as close as kubectl explain. Moreover, Authorino does not pile up any other redundant layers of APIs, event-processing, RBAC, transformation and validation webhooks, etc. It is Kubernetes in its best.

In terms of scale, Authorino AuthConfigs should grow proportionally to the number of protected services, virtually limited by nothing but the Kubernetes API data storage, while namespace division and label selectors help adjust horizontally and keep distributed.

In other words, there are lots of benefits of using Kubernetes custom resources and custom controllers, and unless you are planning on bursting your server with more services than it can keep record, it is totally \ud83d\udc4d to store your AuthN/AuthZ configs as cluster API objects.

Can I use Authorino for rate limiting?

You can, but you shouldn't. Check out instead Limitador, for simple and efficient global rate limiting. Combine it with Authorino and Authorino's support for Envoy Dynamic Metadata for authenticated rate limiting.

"},{"location":"authorino/#benchmarks","title":"Benchmarks","text":"

Configuration of the tests (Authorino features):

Performance test Identity Metadata Authorization Response ReconcileAuthConfig OIDC/JWT UserInfo, UMA OPA(inline Rego) - AuthPipeline OIDC/JWT - JSON pattern-matching(JWT claim check) - APIKeyAuthn API key N/A N/A N/A JSONPatternMatchingAuthz N/A N/A JSON pattern-matching N/A OPAAuthz N/A N/A OPA(inline Rego) N/A

Platform: linux/amd64 CPU: Intel\u00ae Xeon\u00ae Platinum 8370C 2.80GHz Cores: 1, 4, 10

Results:

ReconcileAuthConfig:\n\n        \u2502   sec/op    \u2502     B/op     \u2502  allocs/op  \u2502\n\n*         1.533m \u00b1 2%   264.4Ki \u00b1 0%   6.470k \u00b1 0%\n*-4       1.381m \u00b1 6%   264.5Ki \u00b1 0%   6.471k \u00b1 0%\n*-10      1.563m \u00b1 5%   270.2Ki \u00b1 0%   6.426k \u00b1 0%\ngeomean   1.491m        266.4Ki        6.456k\n\nAuthPipeline:\n\n        \u2502   sec/op    \u2502     B/op     \u2502 allocs/op  \u2502\n\n*         388.0\u00b5 \u00b1 2%   80.70Ki \u00b1 0%   894.0 \u00b1 0%\n*-4       348.4\u00b5 \u00b1 5%   80.67Ki \u00b1 2%   894.0 \u00b1 3%\n*-10      356.4\u00b5 \u00b1 2%   78.97Ki \u00b1 0%   860.0 \u00b1 0%\ngeomean   363.9\u00b5        80.11Ki        882.5\n\nAPIKeyAuthn:\n\n        \u2502   sec/op    \u2502    B/op      \u2502 allocs/op  \u2502\n\n*         3.246\u00b5 \u00b1 1%   480.0 \u00b1 0%     6.000 \u00b1 0%\n*-4       3.111\u00b5 \u00b1 0%   480.0 \u00b1 0%     6.000 \u00b1 0%\n*-10      3.091\u00b5 \u00b1 1%   480.0 \u00b1 0%     6.000 \u00b1 0%\ngeomean   3.148\u00b5        480.0          6.000\n\nOPAAuthz vs JSONPatternMatchingAuthz:\n\n        \u2502   OPAAuthz   \u2502      JSONPatternMatchingAuthz       \u2502\n        \u2502    sec/op    \u2502   sec/op     vs base                \u2502\n\n*         87.469\u00b5 \u00b1 1%   1.797\u00b5 \u00b1 1%  -97.95% (p=0.000 n=10)\n*-4       95.954\u00b5 \u00b1 3%   1.766\u00b5 \u00b1 0%  -98.16% (p=0.000 n=10)\n*-10      96.789\u00b5 \u00b1 4%   1.763\u00b5 \u00b1 0%  -98.18% (p=0.000 n=10)\ngeomean    93.31\u00b5        1.775\u00b5       -98.10%\n\n        \u2502   OPAAuthz    \u2502      JSONPatternMatchingAuthz      \u2502\n        \u2502     B/op      \u2502    B/op     vs base                \u2502\n\n*         28826.00 \u00b1 0%   64.00 \u00b1 0%  -99.78% (p=0.000 n=10)\n*-4       28844.00 \u00b1 0%   64.00 \u00b1 0%  -99.78% (p=0.000 n=10)\n*-10      28862.00 \u00b1 0%   64.00 \u00b1 0%  -99.78% (p=0.000 n=10)\ngeomean    28.17Ki        64.00       -99.78%\n\n        \u2502   OPAAuthz   \u2502      JSONPatternMatchingAuthz      \u2502\n        \u2502  allocs/op   \u2502 allocs/op   vs base                \u2502\n\n*         569.000 \u00b1 0%   2.000 \u00b1 0%  -99.65% (p=0.000 n=10)\n*-4       569.000 \u00b1 0%   2.000 \u00b1 0%  -99.65% (p=0.000 n=10)\n*-10      569.000 \u00b1 0%   2.000 \u00b1 0%  -99.65% (p=0.000 n=10)\ngeomean     569.0        2.000       -99.65%\n

"},{"location":"authorino/#contributing","title":"Contributing","text":"

If you are interested in contributing to Authorino, please refer to the Developer's guide for info about the stack and requirements, workflow, policies and Code of Conduct.

Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.

"},{"location":"authorino/#license","title":"License","text":""},{"location":"authorino/docs/","title":"Documentation","text":""},{"location":"authorino/docs/#getting-started","title":"Getting started","text":""},{"location":"authorino/docs/#terminology","title":"Terminology","text":""},{"location":"authorino/docs/#architecture","title":"Architecture","text":""},{"location":"authorino/docs/#feature-description","title":"Feature description","text":""},{"location":"authorino/docs/#user-guides","title":"User guides","text":""},{"location":"authorino/docs/#developers-guide","title":"Developer\u2019s guide","text":""},{"location":"authorino/docs/architecture/","title":"Architecture","text":""},{"location":"authorino/docs/architecture/#overview","title":"Overview","text":"

There are a few concepts to understand Authorino's architecture. The main components are: Authorino, Envoy and the Upstream service to be protected. Envoy proxies requests to the configured virtual host upstream service, first contacting with Authorino to decide on authN/authZ.

The topology can vary from centralized proxy and centralized authorization service, to dedicated sidecars, with the nuances in between. Read more about the topologies in the Topologies section below.

Authorino is deployed using the Authorino Operator, from an Authorino Kubernetes custom resource. Then, from another kind of custom resource, the AuthConfig CRs, each Authorino instance reads and adds to the index the exact rules of authN/authZ to enforce for each protected host (\"index reconciliation\").

Everything that the AuthConfig reconciler can fetch in reconciliation-time is stored in the index. This is the case of static parameters such as signing keys, authentication secrets and authorization policies from external policy registries.

AuthConfigs can refer to identity providers (IdP) and trusted auth servers whose access tokens will be accepted to authenticate to the protected host. Consumers obtain an authentication token (short-lived access token or long-lived API key) and send those in the requests to the protected service.

When Authorino is triggered by Envoy via the gRPC interface, it starts evaluating the Auth Pipeline, i.e. it applies to the request the parameters to verify the identity and to enforce authorization, as found in the index for the requested host (See host lookup for details).

Apart from static rules, these parameters can include instructions to contact online with external identity verifiers, external sources of metadata and policy decision points (PDPs).

On every request, Authorino's \"working memory\" is called Authorization JSON, a data structure that holds information about the context (the HTTP request) and objects from each phase of the auth pipeline: i.e., authentication verification (phase i), ad-hoc metadata fetching (phase ii), authorization policy enforcement (phase iii), dynamic response (phase iv), and callbacks (phase v). The evaluators in each of these phases can both read and write from the Authorization JSON for dynamic steps and decisions of authN/authZ.

"},{"location":"authorino/docs/architecture/#topologies","title":"Topologies","text":"

Typically, upstream APIs are deployed to the same Kubernetes cluster and namespace where the Envoy proxy and Authorino is running (although not necessarily). Whatever is the case, Envoy must be proxying to the upstream API (see Envoy's HTTP route components and virtual hosts) and pointing to Authorino in the external authorization filter.

This can be achieved with different topologies:

  • Envoy can be a centralized gateway with one dedicated instance of Authorino, proxying to one or more upstream services
  • Envoy can be deployed as a sidecar of each protected service, but still contacting from a centralized Authorino authorization service
  • Both Envoy and Authorino deployed as sidecars of the protected service, restricting all communication between them to localhost

Each topology above induces different measures for security.

"},{"location":"authorino/docs/architecture/#centralized-gateway","title":"Centralized gateway","text":"

Recommended in the protected services to validate the origin of the traffic. It must have been proxied by Envoy. See Authorino JSON injection for an extra validation option using a shared secret passed in HTTP header.

"},{"location":"authorino/docs/architecture/#centralized-authorization-service","title":"Centralized authorization service","text":"

Protected service should only listen on localhost and all traffic can be considered safe.

"},{"location":"authorino/docs/architecture/#sidecars","title":"Sidecars","text":"

Recommended namespaced instances of Authorino with fine-grained label selectors to avoid unnecessary caching of AuthConfigs.

Apart from that, protected service should only listen on localhost and all traffic can be considered safe.

"},{"location":"authorino/docs/architecture/#cluster-wide-vs-namespaced-instances","title":"Cluster-wide vs. Namespaced instances","text":"

Authorino instances can run in either cluster-wide or namespaced mode.

Namespace-scoped instances only watch resources (AuthConfigs and Secrets) created in a given namespace. This deployment mode does not require admin privileges over the Kubernetes cluster to deploy the instance of the service (given Authorino's CRDs have been installed beforehand, such as when Authorino is installed using the Authorino Operator).

Cluster-wide deployment mode, in contraposition, deploys instances of Authorino that watch resources across the entire cluster, consolidating all resources into a multi-namespace index of auth configs. Admin privileges over the Kubernetes cluster is required to deploy Authorino in cluster-wide mode.

Be careful to avoid superposition when combining multiple Authorino instances and instance modes in the same Kubernetes cluster. Apart from caching unnecessary auth config data in the instances depending on your routing settings, the leaders of each instance (set of replicas) may compete for updating the status of the custom resources that are reconciled. See Resource reconciliation and status update for more information.

If necessary, use label selectors to narrow down the space of resources watched and reconciled by each Authorino instance. Check out the Sharding section below for details.

"},{"location":"authorino/docs/architecture/#the-authorino-authconfig-custom-resource-definition-crd","title":"The Authorino AuthConfig Custom Resource Definition (CRD)","text":"

The desired protection for a service is declaratively stated by applying an AuthConfig Custom Resource to the Kubernetes cluster running Authorino.

An AuthConfig resource typically looks like the following:

apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\nspec:\n    # The list of public host names of the services protected by this AuthConfig resource.\n    # Authorino uses the host name provided in the payload of external authorization request to lookup for the corresponding AuthConfig to enforce.\n  # Hostname collisions are prevented by rejecting to index a hostname already taken by another AuthConfig.\n  # Format: hostname[:port]\n  hosts:\n\n  - my-api.io:443 # north-south traffic\n  - my-api.ns.svc.cluster.local # east-west traffic\n\n  # Set of stored named patterns to be reused in conditions and pattern-matching authorization rules\n  patterns: {\"name\" \u2192 {selector, operator, value}, \u2026}\n\n  # Top-level conditions for the AuthConfig to be enforced.\n  # If omitted, the AuthConfig will be enforced at all requests.\n  # If present, all conditions must match for the AuthConfig to be enforced; otherwise, Authorino skips the AuthConfig and returns to the auth request with status OK.\n  when: [{selector, operator, value | named pattern ref}, \u2026]\n\n  # List of one or more trusted sources of identity:\n  # - Configurations to verify JSON Web Tokens (JWTs) issued by an OpenID Connect (OIDC) server\n  # - Endpoints for OAuth 2.0 token introspection\n  # - Attributes for the Kubernetes `TokenReview` API\n  # - Label selectors for API keys (stored in Kubernetes `Secret`s)\n  # - Label selectors trusted x509 issuer certificates (stored in Kubernetes `Secret`s)\n  # - Selectors for plain identity objects supplied in the payload of the authorization request\n  # - Anonymous access configs\n  authentication: {\"name\" \u2192 {\u2026}, \u2026}\n\n  # List of sources of external metadata for the authorization (optional):\n  # - Endpoints for HTTP GET or GET-by-POST requests\n  # - OIDC UserInfo endpoints (associated with an OIDC token issuer specified in the authentication configs)\n  # - User-Managed Access (UMA) resource registries\n  metadata: {\"name\" \u2192 {\u2026}, \u2026}\n\n  # List of authorization policies to be enforced (optional):\n  # - Pattern-matching rules (e.g. `context.request.http.path eq '/pets'`)\n  # - Open Policy Agent (OPA) inline or external Rego policies\n  # - Attributes for the Kubernetes `SubjectAccessReview` API\n  # \u2013 Attributes for authorization with an external SpiceDB server\n  authorization: {\"name\" \u2192 {\u2026}, \u2026}\n\n  # Customization to the response to the external authorization request (optional)\n  response:\n    # List of dynamic response elements into the request on success authoization (optional):\n    # - Plain text\n    # - JSON objects\n    # - Festival Wristbands (signed JWTs issued by Authorino)\n    success:\n      # List of HTTP headers to inject into the request post-authorization (optional):\n      headers: {\"name\" \u2192 {\u2026}, \u2026}\n\n      # List of Envoy Dynamic Metadata to inject into the request post-authorization (optional):\n      dynamicMetadata: {\"name\" \u2192 {\u2026}, \u2026}\n\n    # Custom HTTP status code, message and headers to replace the default `401 Unauthorized` response (optional)\n    unauthenticated:\n      code: 302\n      message: Redirecting to login\n      headers:\n        \"Location\":\n          value: https://my-app.io/login\n\n    # Custom HTTP status code, message and headers to replace the default `and `403 Forbidden` response (optional)\n    unauthorized: {code, message, headers, body}\n\n  # List of callback targets:\n  # - Endpoints for HTTP requests\n  callbacks: {\"name\" \u2192 {\u2026}, \u2026}\n

Check out the OAS of the AuthConfig CRD for a formal specification of the options for authentication verification, external metadata fetching, authorization policies, and dynamic response, as well as any other host protection capability implemented by Authorino.

You can also read the specification from the CLI using the kubectl explain command. The Authorino CRD is required to have been installed in Kubernetes cluster. E.g. kubectl explain authconfigs.spec.authentication.overrides.

A complete description of supported features and corresponding configuration options within an AuthConfig CR can be found in the Features page.

More concrete examples of AuthConfigs for specific use-cases can be found in the User guides.

"},{"location":"authorino/docs/architecture/#resource-reconciliation-and-status-update","title":"Resource reconciliation and status update","text":"

The instances of the Authorino authorization service workload, following the Operator pattern, watch events related to the AuthConfig custom resources, to build and reconcile an in-memory index of configs. Whenever a replica receives traffic for authorization request, it looks up in the index of AuthConfigs and then triggers the \"Auth Pipeline\", i.e. enforces the associated auth spec onto the request.

An instance can be a single authorization service workload or a set of replicas. All replicas watch and reconcile the same set of resources that match the --auth-config-label-selector and --secret-label-selector configuration options. (See both Cluster-wide vs. Namespaced instances and Sharding, for details about defining the reconciliation space of Authorino instances.)

The above means that all replicas of an Authorino instance should be able to receive traffic for authorization requests.

Among the multiple replicas of an instance, Authorino elects one replica to be leader. The leader is responsible for updating the status of reconciled AuthConfigs. If the leader eventually becomes unavailable, the instance will automatically elect another replica take its place as the new leader.

The status of an AuthConfig tells whether the resource is \"ready\" (i.e. indexed). It also includes summary information regarding the numbers of authentication configs, metadata configs, authorization configs and response configs within the spec, as well as whether Festival Wristband tokens are being issued by the Authorino instance as by spec.

Apart from watching events related to AuthConfig custom resources, Authorino also watches events related to Kubernetes Secrets, as part of Authorino's API key authentication feature. Secret resources that store API keys are linked to their corresponding AuthConfigs in the index. Whenever the Authorino instance detects a change in the set of API key Secrets linked to an AuthConfigs, the instance reconciles the index.

Authorino only watches events related to Secrets whose metadata.labels match the label selector --secret-label-selector of the Authorino instance. The default values of the label selector for Kubernetes Secrets representing Authorino API keys is authorino.kuadrant.io/managed-by=authorino.

"},{"location":"authorino/docs/architecture/#the-auth-pipeline-aka-enforcing-protection-in-request-time","title":"The \"Auth Pipeline\" (aka: enforcing protection in request-time)","text":"

In each request to the protected API, Authorino triggers the so-called \"Auth Pipeline\", a set of configured evaluators that are organized in a 5-phase pipeline:

  • (i) Authentication phase: at least one source of identity (i.e., one authentication config) must resolve the supplied credential in the request into a valid identity or Authorino will otherwise reject the request as unauthenticated (401 HTTP response status).
  • (ii) Metadata phase: optional fetching of additional data from external sources, to add up to context and identity information, and used in authorization policies, dynamic responses and callback requests (phases iii to v).
  • (iii) Authorization phase: all unskipped policies must evaluate to a positive result (\"authorized\"), or Authorino will otherwise reject the request as unauthorized (403 HTTP response code).
  • (iv) Response phase \u2013 Authorino builds all user-defined response items (dynamic JSON objects and/or Festival Wristband OIDC tokens), which are supplied back to the external authorization client within added HTTP headers or as Envoy Dynamic Metadata
  • (v) Callbacks phase \u2013 Authorino sends callbacks to specified HTTP endpoints.

Each phase is sequential to the other, from (i) to (v), while the evaluators within each phase are triggered concurrently or as prioritized. The Authentication phase (i) is the only one required to list at least one evaluator (i.e. 1+ authentication configs); Metadata, Authorization and Response phases can have any number of evaluators (including zero, and even be omitted in this case).

"},{"location":"authorino/docs/architecture/#host-lookup","title":"Host lookup","text":"

Authorino reads the request host from Attributes.Http.Host of Envoy's CheckRequest type, and uses it as key to lookup in the index of AuthConfigs, matched against spec.hosts.

Alternatively to Attributes.Http.Host, a host entry can be supplied in the Attributes.ContextExtensions map of the external authorino request. This will take precedence before the host attribute of the HTTP request.

The host context extension is useful to support use cases such as of path prefix-based lookup and wildcard subdomains lookup with lookup strongly dictated by the external authorization client (e.g. Envoy), which often knows about routing and the expected AuthConfig to enforce beyond what Authorino can infer strictly based on the host name.

Wildcards can also be used in the host names specified in the AuthConfig, resolved by Authorino. E.g. if *.pets.com is in spec.hosts, Authorino will match the concrete host names dogs.pets.com, cats.pets.com, etc. In case, of multiple possible matches, Authorino will try the longest match first (in terms of host name labels) and fall back to the closest wildcard upwards in the domain tree (if any).

When more than one host name is specified in the AuthConfig, all of them can be used as key, i.e. all of them can be requested in the authorization request and will be mapped to the same config.

Example. Host lookup with wildcards.

The domain tree above induces the following relation:

  • foo.nip.io \u2192 authconfig-1 (matches *.io)
  • talker-api.nip.io \u2192 authconfig-2 (matches talker-api.nip.io)
  • dogs.pets.com \u2192 authconfig-2 (matches *.pets.com)
  • api.acme.com \u2192 authconfig-3 (matches api.acme.com)
  • www.acme.com \u2192 authconfig-4 (matches *.acme.com)
  • foo.org \u2192 404 Not found

The host can include the port number (i.e. hostname:port) or it can be just the name of the host name. Authorino will first try finding in the index a config associated to hostname:port, as supplied in the authorization request; if the index misses an entry for hostname:port, Authorino will then remove the :port suffix and repeat the lookup using just hostname as key. This provides implicit support for multiple port numbers for a same host without having to list all combinations in the AuthConfig.

"},{"location":"authorino/docs/architecture/#avoiding-host-name-collision","title":"Avoiding host name collision","text":"

Authorino tries to prevent host name collision between AuthConfigs by rejecting to link in the index any AuthConfig and host name if the host name is already linked to a different AuthConfig in the index. This was intentionally designed to prevent users from superseding each other's AuthConfigs, partially or fully, by just picking the same host names or overlapping host names as others.

When wildcards are involved, a host name that matches a host wildcard already linked in the index to another AuthConfig will be considered taken, and therefore the newest AuthConfig will be rejected to be linked to that host.

This behavior can be disabled to allow AuthConfigs to partially supersede each others' host names (limited to strict host subsets), by supplying the --allow-superseding-host-subsets command-line flag when running the Authorino instance.

"},{"location":"authorino/docs/architecture/#the-authorization-json","title":"The Authorization JSON","text":"

On every Auth Pipeline, Authorino builds the Authorization JSON, a \"working-memory\" data structure composed of context (information about the request, as supplied by the Envoy proxy to Authorino) and auth (objects resolved in phases (i) to (v) of the pipeline). The evaluators of each phase can read from the Authorization JSON and implement dynamic properties and decisions based on its values.

At phase (iii), the authorization evaluators count on an Authorization JSON payload that looks like the following:

// The authorization JSON combined along Authorino's auth pipeline for each request\n{\n  \"context\": { // the input from the proxy\n    \"origin\": {\u2026},\n    \"request\": {\n      \"http\": {\n        \"method\": \"\u2026\",\n        \"headers\": {\u2026},\n        \"path\": \"/\u2026\",\n        \"host\": \"\u2026\",\n        \u2026\n      }\n    }\n  },\n  \"auth\": {\n    \"identity\": {\n      // the identity resolved, from the supplied credentials, by one of the evaluators of phase (i)\n    },\n    \"metadata\": {\n      // each metadata object/collection resolved by the evaluators of phase (ii), by name of the evaluator\n    }\n  }\n}\n

The policies evaluated can use any data from the authorization JSON to define authorization rules.

After phase (iii), Authorino appends to the authorization JSON the results of this phase as well, and the payload available for phase (iv) becomes:

// The authorization JSON combined along Authorino's auth pipeline for each request\n{\n  \"context\": { // the input from the proxy\n    \"origin\": {\u2026},\n    \"request\": {\n      \"http\": {\n        \"method\": \"\u2026\",\n        \"headers\": {\u2026},\n        \"path\": \"/\u2026\",\n        \"host\": \"\u2026\",\n        \u2026\n      }\n    }\n  },\n  \"auth\": {\n    \"identity\": {\n      // the identity resolved, from the supplied credentials, by one of the evaluators of phase (i)\n    },\n    \"metadata\": {\n      // each metadata object/collection resolved by the evaluators of phase (ii), by name of the evaluator\n    },\n    \"authorization\": {\n      // each authorization policy result resolved by the evaluators of phase (iii), by name of the evaluator\n    }\n  }\n}\n

Festival Wristbands and Dynamic JSON responses can include dynamic values (custom claims/properties) fetched from the authorization JSON. These can be returned to the external authorization client in added HTTP headers or as Envoy Well Known Dynamic Metadata. Check out Custom response features for details.

For information about reading and fetching data from the Authorization JSON (syntax, functions, etc), check out Common Expression Language (CEL).

"},{"location":"authorino/docs/architecture/#raw-http-authorization-interface","title":"Raw HTTP Authorization interface","text":"

Besides providing the gRPC authorization interface \u2013 that implements the Envoy gRPC authorization server \u2013, Authorino also provides another interface for raw HTTP authorization. This second interface responds to GET and POST HTTP requests sent to :5001/check, and is suitable for other forms of integration, such as:

  • using Authorino as Kubernetes ValidatingWebhook service (example);
  • other HTTP proxies and API gateways;
  • old versions of Envoy incompatible with the latest version of gRPC external authorization protocol (Authorino is based on v3.19.1 of Envoy external authorization API)

In the raw HTTP interface, the host used to lookup for an AuthConfig must be supplied in the Host HTTP header of the request. Other attributes of the HTTP request are also passed in the context to evaluate the AuthConfig, including the body of the request.

"},{"location":"authorino/docs/architecture/#caching","title":"Caching","text":""},{"location":"authorino/docs/architecture/#openid-connect-and-user-managed-access-configs","title":"OpenID Connect and User-Managed Access configs","text":"

OpenID Connect and User-Managed Access configurations, discovered usually at reconciliation-time from well-known discovery endpoints.

Cached individual OpenID Connect configurations discovered by Authorino can be configured to be auto-refreshed, by setting the corresponding spec.authentication.jwt.ttl field in the AuthConfig (given in seconds, default: 0 \u2013 i.e. no cache update).

"},{"location":"authorino/docs/architecture/#json-web-keys-jwks-and-json-web-key-sets-jwks","title":"JSON Web Keys (JWKs) and JSON Web Key Sets (JWKS)","text":"

JSON signature verification certificates linked by discovered OpenID Connect configurations, fetched usually at reconciliation-time.

"},{"location":"authorino/docs/architecture/#revoked-access-tokens","title":"Revoked access tokens","text":"Not implemented - In analysis (#19)

Caching of access tokens identified and or notified as revoked prior to expiration.

"},{"location":"authorino/docs/architecture/#external-metadata","title":"External metadata","text":"Not implemented - Planned (#21)

Caching of resource data obtained in previous requests.

"},{"location":"authorino/docs/architecture/#compiled-rego-policies","title":"Compiled Rego policies","text":"

Performed automatically by Authorino at reconciliation-time for the authorization policies based on the built-in OPA module.

Precompiled and cached individual Rego policies originally pulled by Authorino from external registries can be configured to be auto-refreshed, by setting the corresponding spec.authorization.opa.externalRegistry.ttl field in the AuthConfig (given in seconds, default: 0 \u2013 i.e. no cache update).

"},{"location":"authorino/docs/architecture/#repeated-requests","title":"Repeated requests","text":"Not implemented - In analysis (#20)

For consecutive requests performed, within a given period of time, by a same user that request for a same resource, such that the result of the auth pipeline can be proven that would not change.

"},{"location":"authorino/docs/architecture/#sharding","title":"Sharding","text":"

By default, Authorino instances will watch AuthConfig CRs in the entire space (namespace or entire cluster; see Cluster-wide vs. Namespaced instances for details). To support combining multiple Authorino instances and instance modes in the same Kubernetes cluster, and yet avoiding superposition between the instances (i.e. multiple instances reconciling the same AuthConfigs), Authorino offers support for data sharding, i.e. to horizontally narrow down the space of reconciliation of an Authorino instance to a subset of that space.

The benefits of limiting the space of reconciliation of an Authorino instance include avoiding unnecessary caching and workload in instances that do not receive corresponding traffic (according to your routing settings) and preventing leaders of multiple instances (sets of replicas) to compete on resource status updates (see Resource reconciliation and status update for details).

Use-cases for sharding of AuthConfigs:

  • Horizontal load balancing of traffic of authorization requests
  • Supporting for managed centralized instances of Authorino to API owners who create and maintain their own AuthConfigs within their own user namespaces.

Authorino's custom controllers filter the AuthConfig-related events to be reconciled using Kubernetes label selectors, defined for the Authorino instance via --auth-config-label-selector command-line flag. By default, --auth-config-label-selector is empty, meaning all AuthConfigs in the space are watched; this variable can be set to any value parseable as a valid label selector, causing Authorino to then watch only events of AuthConfigs whose metadata.labels match the selector.

The following are all valid examples of AuthConfig label selector filters:

--auth-config-label-selector=\"authorino.kuadrant.io/managed-by=authorino\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by=authorino,other-label=other-value\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by in (authorino,kuadrant)\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by!=authorino-v0.4\"\n--auth-config-label-selector=\"!disabled\"\n
"},{"location":"authorino/docs/architecture/#rbac","title":"RBAC","text":"

The table below describes the roles and role bindings defined by the Authorino service:

Role Kind Scope(*) Description Permissions authorino-manager-role ClusterRole C/N Role of the Authorino manager service Watch and reconcile AuthConfigs and Secrets authorino-manager-k8s-auth-role ClusterRole C/N Role for the Kubernetes auth features Create TokenReviews and SubjectAccessReviews (Kubernetes auth) authorino-leader-election-role Role N Leader election role Create/update the ConfigMap used to coordinate which replica of Authorino is the leader authorino-authconfig-editor-role ClusterRole - AuthConfig editor R/W AuthConfigs; Read AuthConfig/status authorino-authconfig-viewer-role ClusterRole - AuthConfig viewer Read AuthConfigs and AuthConfig/status authorino-proxy-role ClusterRole C/N Kube-rbac-proxy-role (sidecar)'s role Create TokenReviews and SubjectAccessReviews to check permissions to the /metrics endpoint authorino-metrics-reader ClusterRole - Metrics reader GET /metrics

(*) C - Cluster-wide | N - Authorino namespace | C/N - Cluster-wide or Authorino namespace (depending on the deployment mode).

"},{"location":"authorino/docs/architecture/#observability","title":"Observability","text":"

Please refer to the Observability user guide for info on Prometheus metrics exported by Authorino, readiness probe, logging, tracing, etc.

"},{"location":"authorino/docs/code_of_conduct/","title":"Code of conduct","text":""},{"location":"authorino/docs/code_of_conduct/#code-of-conduct","title":"Code of Conduct","text":"

Autorino follows the Kuadrant Community Code of Conduct, which is based on the CNCF Code of Conduct.

Please refer to this page for a description of the standards and values we stand for in our relationship with the community.

"},{"location":"authorino/docs/contributing/","title":"Developer's Guide","text":""},{"location":"authorino/docs/contributing/#technology-stack-for-developers","title":"Technology stack for developers","text":"

Minimum requirements to contribute to Authorino are:

  • Golang v1.21+
  • Docker

Authorino's code was originally bundled using the Operator SDK (v1.9.0).

The following tools can be installed as part of the development workflow:

  • Installed with go install to the $PROJECT_DIR/bin directory:

    • controller-gen: for building custom types and manifests
    • Kustomize: for assembling flavoured manifests and installing/deploying
    • setup-envtest: for running the tests \u2013 extra tools installed to ./testbin
    • benchstat: for human-friendly test benchmark reports
    • mockgen: to generate mocks for tests \u2013 e.g. ./bin/mockgen -source=pkg/auth/auth.go -destination=pkg/auth/mocks/mock_auth.go
    • Kind: for deploying a containerized Kubernetes cluster for integration testing purposes
  • Other recommended tools to have installed:

    • jq
    • yq
    • gnu-sed
"},{"location":"authorino/docs/contributing/#workflow","title":"Workflow","text":""},{"location":"authorino/docs/contributing/#check-the-issues","title":"Check the issues","text":"

Start by checking the list of issues in GitHub.

In case you want to contribute with an idea for enhancement, a bug fix, or question, please make sure to describe the issue so we can start a conversation together and help you find the best way to get your contribution merged.

"},{"location":"authorino/docs/contributing/#clone-the-repo-and-setup-the-local-environment","title":"Clone the repo and setup the local environment","text":"

Fork/clone the repo:

git clone git@github.com:kuadrant/authorino.git && cd authorino\n

Download the Golang dependencies:

make vendor\n

For additional automation provided, check:

make help\n
"},{"location":"authorino/docs/contributing/#make-your-changes","title":"Make your changes","text":"

Good changes...

  • follow the Golang conventions
  • have proper test coverage
  • address corresponding updates to the docs
  • help us fix wherever we failed to do the above \ud83d\ude1c
"},{"location":"authorino/docs/contributing/#run-the-tests","title":"Run the tests","text":"

To run the tests:

make test\n
"},{"location":"authorino/docs/contributing/#try-locally","title":"Try locally","text":""},{"location":"authorino/docs/contributing/#build-deploy-and-try-authorino-in-a-local-cluster","title":"Build, deploy and try Authorino in a local cluster","text":"

The following command will:

  • Start a local Kubernetes cluster (using Kind)
  • Install cert-manager in the cluster
  • Install the Authorino Operator and Authorino CRDs
  • Build an image of Authorino based on the current branch
  • Push the freshly built image to the cluster's registry
  • Generate TLS certificates for the Authorino service
  • Deploy an instance of Authorino
  • Deploy the example application Talker API, a simple HTTP API that echoes back whatever it gets in the request
  • Setup Envoy for proxying to the Talker API and using Authorino for external authorization
make local-setup\n

You will be prompted to edit the Authorino custom resource.

The main workload composed of Authorino instance and user apps (Envoy, Talker API) will be deployed to the default Kubernetes namespace.

Once the deployment is ready, you can forward the requests on port 8000 to the Envoy service

kubectl port-forward deployment/envoy 8000:8000 &\n
Pro tips
  1. Change the default workload namespace by supplying the NAMESPACE argument to your make local-setup and other deployment, apps and local cluster related targets. If the namespace does not exist, it will be created.
  2. Switch to TLS disabled by default when deploying locally by supplying TLS_ENABLED=0 to your make local-setup and make deploy commands. E.g. make local-setup TLS_ENABLED=0.
  3. Skip being prompted to edit the Authorino CR and default to an Authorino deployment with TLS enabled, debug/development log level/mode, and standard name 'authorino', by supplying FF=1 to your make local-setup and make deploy commands. E.g. make local-setup FF=1
  4. Supply DEPLOY_IDPS=1 to make local-setup and make user-apps to deploy Keycloak and Dex to the cluster. DEPLOY_KEYCLOAK and DEPLOY_DEX are also available. Read more about additional tools for specific use cases in the section below.
  5. Saving the ID of the process (PID) of the port-forward command spawned in the background can be useful to later kill and restart the process. E.g. kubectl port-forward deployment/envoy 8000:8000 &;PID=$!; then kill $PID.
"},{"location":"authorino/docs/contributing/#additional-tools-for-specific-use-cases","title":"Additional tools (for specific use-cases)","text":"Limitador

To deploy Limitador \u2013 pre-configured in Envoy for rate-limiting the Talker API to 5 hits per minute per user_id when available in the cluster workload \u2013, run:

kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\n
Keycloak

Authorino examples include a bundle of Keycloak preloaded with the following realm setup:

  • Admin console: http://localhost:8080/admin (admin/p)
  • Preloaded realm: kuadrant
  • Preloaded clients:
    • demo: to which API consumers delegate access and therefore the one which access tokens are issued to
    • authorino: used by Authorino to fetch additional user info with client_credentials grant type
    • talker-api: used by Authorino to fetch UMA-protected resource data associated with the Talker API
  • Preloaded resources:
    • /hello
    • /greetings/1 (owned by user john)
    • /greetings/2 (owned by user jane)
    • /goodbye
  • Realm roles:
    • member (default to all users)
    • admin
  • Preloaded users:
    • john/p (member)
    • jane/p (admin)
    • peter/p (member, email not verified)

To deploy, run:

kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

Forward local requests to the instance of Keycloak running in the cluster:

kubectl port-forward deployment/keycloak 8080:8080 &\n
Dex

Authorino examples include a bundle of Dex preloaded with the following setup:

  • Preloaded clients:
    • demo: to which API consumers delegate access and therefore the one which access tokens are issued to (Client secret: aaf88e0e-d41d-4325-a068-57c4b0d61d8e)
  • Preloaded users:
    • marta@localhost/password

To deploy, run:

kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/dex/dex-deploy.yaml\n

Forward local requests to the instance of Dex running in the cluster:

kubectl port-forward deployment/dex 5556:5556 &\n
a12n-server

Authorino examples include a bundle of a12n-server and corresponding MySQL database, preloaded with the following setup:

  • Admin console: http://a12n-server:8531 (admin/123456)
  • Preloaded clients:
    • service-account-1: to obtain access tokens via client_credentials OAuth2 grant type, to consume the Talker API (Client secret: DbgXROi3uhWYCxNUq_U1ZXjGfLHOIM8X3C2bJLpeEdE); includes metadata privilege: { \"talker-api\": [\"read\"] } that can be used to write authorization policies
    • talker-api: to authenticate to the token introspect endpoint (Client secret: V6g-2Eq2ALB1_WHAswzoeZofJ_e86RI4tdjClDDDb4g)

To deploy, run:

kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/a12n-server/a12n-server-deploy.yaml\n

Forward local requests to the instance of a12n-server running in the cluster:

kubectl port-forward deployment/a12n-server 8531:8531 &\n
"},{"location":"authorino/docs/contributing/#re-build-and-rollout-latest","title":"Re-build and rollout latest","text":"

Re-build and rollout latest Authorino image:

make local-rollout\n

If you made changes to the CRD between iterations, re-install by running:

make install\n
"},{"location":"authorino/docs/contributing/#clean-up","title":"Clean-up","text":"

The following command deletes the entire Kubernetes cluster started with Kind:

make local-cleanup\n
"},{"location":"authorino/docs/contributing/#sign-your-commits","title":"Sign your commits","text":"

All commits to be accepted to Authorino's code are required to be signed. Refer to this page about signing your commits.

"},{"location":"authorino/docs/contributing/#logging-policy","title":"Logging policy","text":"

A few guidelines for adding logging messages in your code:

  1. Make sure you understand Authorino's Logging architecture and policy regarding log levels, log modes, tracing IDs, etc.
  2. Respect controller-runtime's Logging Guidelines.
  3. Do not add sensitive data to your info log messages; instead, redact all sensitive data in your log messages or use debug log level by mutating the logger with V(1) before outputting the message.
"},{"location":"authorino/docs/contributing/#additional-resources","title":"Additional resources","text":"

Here in the repo:

  • Getting started
  • Terminology
  • Architecture
  • Feature description

Other repos:

  • Authorino Operator
  • Authorino examples
"},{"location":"authorino/docs/contributing/#reach-out","title":"Reach out","text":"

#kuadrant channel on kubernetes.slack.com.

"},{"location":"authorino/docs/features/","title":"Features","text":""},{"location":"authorino/docs/features/#overview","title":"Overview","text":"

We call features of Authorino the different things one can do to enforce identity verification & authentication and authorization on requests to protected services. These can be a specific identity verification method based on a supported authentication protocol, or a method to fetch additional auth metadata in request-time, etc.

Most features of Authorino relate to the different phases of the Auth Pipeline and therefore are configured in the Authorino AuthConfig. An identity verification/authentication feature usually refers to a functionality of Authorino such as the API key-based authentication, the validation of JWTs/OIDC ID tokens, and authentication based on Kubernetes TokenReviews. Analogously, OPA, pattern-matching and Kubernetes SubjectAccessReview are examples of authorization features of Authorino.

At a deeper level, a feature can also be an additional functionality within a bigger feature, usually applicable to the whole class the bigger feature belongs to. For instance, the configuration of how auth credentials expected to be carried in the request, which is broadly available for any identity verification method. Other examples are: Identity extension and Priorities.

A full specification of all features of Authorino that can be configured in an AuthConfig can be found in the official spec of the custom resource definition.

You can also learn about Authorino features by using the kubectl explain command in a Kubernetes cluster where the Authorino CRD has been installed. E.g. kubectl explain authconfigs.spec.authentication.credentials.

"},{"location":"authorino/docs/features/#common-feature-json-paths-selector","title":"Common feature: JSON paths (selector)","text":"

Deprecated: Prefer predicate and expression, based on Common Expression Language (CEL), instead.

The first feature of Authorino to learn about is a common functionality used in the specification of many other features. JSON paths are selectors of data from the Authorization JSON used in parts of an AuthConfig for referring to dynamic values of each authorization request.

Usage examples of JSON paths are: dynamic URLs and request parameters when fetching metadata from external sources, dynamic authorization policy rules, and dynamic authorization response attributes (e.g. injected HTTP headers, Festival Wristband token claims, etc).

"},{"location":"authorino/docs/features/#syntax","title":"Syntax","text":"

The syntax to fetch data from the Authorization JSON with JSON paths is based on GJSON. Refer to GJSON Path Syntax page for more information.

"},{"location":"authorino/docs/features/#string-modifiers","title":"String modifiers","text":"

On top of GJSON, Authorino defines a few string modifiers.

Examples below provided for the following Authorization JSON:

{\n  \"context\": {\n    \"request\": {\n      \"http\": {\n        \"path\": \"/pets/123\",\n        \"headers\": {\n          \"authorization\": \"Basic amFuZTpzZWNyZXQK\" // jane:secret\n          \"baggage\": \"eyJrZXkxIjoidmFsdWUxIn0=\" // {\"key1\":\"value1\"}\n        }\n      }\n    }\n  },\n  \"auth\": {\n    \"identity\": {\n      \"username\": \"jane\",\n      \"fullname\": \"Jane Smith\",\n      \"email\": \"\\u0006jane\\u0012@petcorp.com\\n\"\n    },\n  },\n}\n

@strip Strips out any non-printable characters such as carriage return. E.g. auth.identity.email.@strip \u2192 \"jane@petcorp.com\".

@case:upper|lower Changes the case of a string. E.g. auth.identity.username.@case:upper \u2192 \"JANE\".

@replace:{\"old\":string,\"new\":string} Replaces a substring within a string. E.g. auth.identity.username.@replace:{\"old\":\"Smith\",\"new\":\"Doe\"} \u2192 \"Jane Doe\".

@extract:{\"sep\":string,\"pos\":int} Splits a string at occurrences of a separator (default: \" \") and selects the substring at the pos-th position (default: 0). E.g. context.request.path.@extract:{\"sep\":\"/\",\"pos\":2} \u2192 123.

@base64:encode|decode base64-encodes or decodes a string value. E.g. auth.identity.username.decoded.@base64:encode \u2192 \"amFuZQo=\".

In combination with @extract, @base64 can be used to extract the username in an HTTP Basic Authentication request. E.g. context.request.headers.authorization.@extract:{\"pos\":1}|@base64:decode|@extract:{\"sep\":\":\",\"pos\":1} \u2192 \"jane\".

"},{"location":"authorino/docs/features/#interpolation","title":"Interpolation","text":"

JSON paths can be interpolated into strings to build template-like dynamic values. E.g. \"Hello, {auth.identity.name}!\".

"},{"location":"authorino/docs/features/#common-feature-common-expression-language-cel","title":"Common feature: Common Expression Language (CEL)","text":"

Similar to JSON Paths, Authorino supports Common Expression Language (CEL) for selecting data from the Authorization JSON and representing predicates. This is a more powerful, properly typed alternative to JSON Paths, with a well-documented syntax.

String extension functions, such as split, substring, indexOf, etc, are also supported.

Use the expression field for selecting values from the Authorization JSON. The type of the selected value will be converted to a JSON-compatible equivalent. Complex types without a direct JSON equivalent may be converted to objects (e.g. google.golang.org/protobuf/types/known/timestamppb.Timestamp gets converted to { \"seconds\": Number, \"nanos\": Number })

The most common applications of expression are for building dynamic URLs and request parameters when fetching metadata from external sources, extending properties of identity objects, and dynamic authorization response attributes (e.g. injected HTTP headers, etc).

Use predicate for expressions that return a boolean value, such as in when conditions and pattern-matching authorization rules.

"},{"location":"authorino/docs/features/#identity-verification-authentication-features-authentication","title":"Identity verification & authentication features (authentication)","text":""},{"location":"authorino/docs/features/#api-key-authenticationapikey","title":"API key (authentication.apiKey)","text":"

Authorino relies on Kubernetes Secret resources to represent API keys.

To define an API key, create a Secret in the cluster containing an api_key entry that holds the value of the API key.

API key secrets must be created in the same namespace of the AuthConfig (default) or spec.authentication.apiKey.allNamespaces must be set to true (only works with cluster-wide Authorino instances).

API key secrets must be labeled with the labels that match the selectors specified in spec.authentication.apiKey.selector in the AuthConfig.

Whenever an AuthConfig is indexed, Authorino will also index all matching API key secrets. In order for Authorino to also watch events related to API key secrets individually (e.g. new Secret created, updates, deletion/revocation), Secrets must also include a label that matches Authorino's bootstrap configuration --secret-label-selector (default: authorino.kuadrant.io/managed-by=authorino). This label may or may not be present to spec.authentication.apiKey.selector in the AuthConfig without implications for the caching of the API keys when triggered by the reconciliation of the AuthConfig; however, if not present, individual changes related to the API key secret (i.e. without touching the AuthConfig) will be ignored by the reconciler.

Example. For the following AuthConfig:

apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\n  namespace: authorino-system\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"api-key-users\":\n      apiKey:\n        selector:\n          matchLabels: # the key-value set used to select the matching `Secret`s; resources including these labels will be accepted as valid API keys to authenticate to this service\n            group: friends # some custom label\n        allNamespaces: true # only works with cluster-wide Authorino instances; otherwise, create the API key secrets in the same namespace of the AuthConfig\n

The following Kubernetes Secret represents a valid API key:

apiVersion: v1\nkind: Secret\nmetadata:\n  name: user-1-api-key-1\n  namespace: default\n  labels:\n    authorino.kuadrant.io/managed-by: authorino # so the Authorino controller reconciles events related to this secret\n    group: friends\nstringData:\n  api_key: <some-randomly-generated-api-key-value>\ntype: Opaque\n

The resolved identity object, added to the authorization JSON following an API key identity source evaluation, is the Kubernetes Secret resource (as JSON).

"},{"location":"authorino/docs/features/#kubernetes-tokenreview-authenticationkubernetestokenreview","title":"Kubernetes TokenReview (authentication.kubernetesTokenReview)","text":"

Authorino can verify Kubernetes-valid access tokens (using Kubernetes TokenReview API).

These tokens can be either ServiceAccount tokens such as the ones issued by kubelet as part of Kubernetes Service Account Token Volume Projection, or any valid user access tokens issued to users of the Kubernetes server API.

The list of audiences of the token must include the requested host and port of the protected API (default), or all audiences specified in the Authorino AuthConfig custom resource. For example:

For the following AuthConfig CR, the Kubernetes token must include the audience my-api.io:

apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"cluster-users\":\n      kubernetesTokenReview: {}\n

Whereas for the following AuthConfig CR, the Kubernetes token audiences must include foo and bar:

apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"cluster-users\":\n      kubernetesTokenReview:\n        audiences:\n        - foo\n        - bar\n

The resolved identity object added to the authorization JSON following a successful Kubernetes authentication identity evaluation is the status field of TokenReview response (see TokenReviewStatus for reference).

"},{"location":"authorino/docs/features/#jwt-verification-authenticationjwt","title":"JWT verification (authentication.jwt)","text":"

In reconciliation-time, using OpenID Connect Discovery well-known endpoint, Authorino automatically discovers and caches OpenID Connect configurations and associated JSON Web Key Sets (JWKS) for all OpenID Connect issuers declared in an AuthConfig. Then, in request-time, Authorino verifies the JSON Web Signature (JWS) and check the time validity of signed JSON Web Tokens (JWT) supplied on each request.

Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.

The kid claim stated in the JWT header must match one of the keys cached by Authorino during OpenID Connect Discovery, therefore supporting JWK rotation.

The decoded payload of the validated JWT is appended to the authorization JSON as the resolved identity.

OpenID Connect configurations and linked JSON Web Key Sets can be configured to be automatically refreshed (pull again from the OpenID Connect Discovery well-known endpoints), by setting the authentication.jwt.ttl field (given in seconds, default: 0 \u2013 i.e. auto-refresh disabled).

For an excellent summary of the underlying concepts and standards that relate OpenID Connect and JSON Object Signing and Encryption (JOSE), see this article by Jan Rusnacko. For official specification and RFCs, see OpenID Connect Core, OpenID Connect Discovery, JSON Web Token (JWT) (RFC7519), and JSON Object Signing and Encryption (JOSE).

"},{"location":"authorino/docs/features/#oauth-20-introspection-authenticationoauth2introspection","title":"OAuth 2.0 introspection (authentication.oauth2Introspection)","text":"

For bare OAuth 2.0 implementations, Authorino can perform token introspection on the access tokens supplied in the requests to protected APIs.

Authorino does not implement any of OAuth 2.0 grants for the applications to obtain the token. However, it can verify supplied tokens with the OAuth server, including opaque tokens, as long as the server exposes the token_introspect endpoint (RFC 7662).

Developers must set the token introspection endpoint in the AuthConfig, as well as a reference to the Kubernetes secret storing the credentials of the OAuth client to be used by Authorino when requesting the introspect.

The response returned by the OAuth2 server to the token introspection request is the resolved identity appended to the authorization JSON.

"},{"location":"authorino/docs/features/#x509-client-certificate-authentication-authenticationx509","title":"X.509 client certificate authentication (authentication.x509)","text":"

Authorino can verify X.509 certificates presented by clients for authentication on the request to the protected APIs, at application level.

Trusted root Certificate Authorities (CA) are stored in Kubernetes Secrets labeled according to selectors specified in the AuthConfig, watched and indexed by Authorino. Make sure to create proper kubernetes.io/tls-typed Kubernetes Secrets, containing the public certificates of the CA stored in either a tls.crt or ca.crt entry inside the secret.

Trusted root CA secrets must be created in the same namespace of the AuthConfig (default) or spec.authentication.x509.allNamespaces must be set to true (only works with cluster-wide Authorino instances).

Client certificates must include x509 v3 extension specifying 'Client Authentication' extended key usage.

The identity object resolved out of a client x509 certificate is equal to the subject field of the certificate, and it serializes as JSON within the Authorization JSON usually as follows:

{\n    \"auth\": {\n        \"identity\": {\n            \"CommonName\": \"aisha\",\n            \"Country\": [\"PK\"],\n            \"ExtraNames\": null,\n            \"Locality\": [\"Islamabad\"],\n            \"Names\": [\n                { \"Type\": [2, 5, 4, 3], \"Value\": \"aisha\" },\n                { \"Type\": [2, 5, 4, 6], \"Value\": \"PK\" },\n                { \"Type\": [2, 5, 4, 7], \"Value\": \"Islamabad\" },\n                { \"Type\": [2, 5, 4,10], \"Value\": \"ACME Inc.\" },\n                { \"Type\": [2, 5, 4,11], \"Value\": \"Engineering\" }\n            ],\n            \"Organization\": [\"ACME Inc.\"],\n            \"OrganizationalUnit\": [\"Engineering\"],\n            \"PostalCode\": null,\n            \"Province\": null,\n            \"SerialNumber\": \"\",\n            \"StreetAddress\": null\n        }\n  }\n}\n
"},{"location":"authorino/docs/features/#plain-authenticationplain","title":"Plain (authentication.plain)","text":"

Authorino can read plain identity objects, based on authentication tokens provided and verified beforehand using other means (e.g. Envoy JWT Authentication filter, Kubernetes API server authentication), and injected into the payload to the external authorization service.

The plain identity object is retrieved from the Authorization JSON. See Common Expression Language (CEL).

This feature is particularly useful in cases where authentication/identity verification is handled before invoking the authorization service and its resolved value injected in the payload can be trusted. Examples of applications for this feature include:

  • Authentication handled in Envoy leveraging the Envoy JWT Authentication filter (decoded JWT injected as 'metadata_context')
  • Use of Authorino as Kubernetes ValidatingWebhook service (Kubernetes 'userInfo' injected in the body of the AdmissionReview request)

Example of AuthConfig to retrieve plain identity object from the Authorization JSON.

spec:\n  authentication:\n    \"pre-validated-jwt\":\n      plain:\n        expression: metadata.filter_metadata['envoy.filters.http.jwt_authn'].verified_jwt\n

If the specified JSON path does not exist in the Authorization JSON or the value is null, the identity verification will fail and, unless other identity config succeeds, Authorino will halt the Auth Pipeline with the usual 401 Unauthorized.

"},{"location":"authorino/docs/features/#anonymous-access-authenticationanonymous","title":"Anonymous access (authentication.anonymous)","text":"

Literally a no-op evaluator for the identity verification phase that returns a static identity object {\"anonymous\":true}.

It allows to implement AuthConfigs that bypasses the identity verification phase of Authorino, to such as:

  • enable anonymous access to protected services (always or combined with Priorities)
  • postpone authentication in the Auth Pipeline to be resolved as part of an OPA policy

Example of AuthConfig spec that falls back to anonymous access when OIDC authentication fails, enforcing read-only access to the protected service in such cases:

spec:\n  authentication:\n    \"jwt\":\n      jwt:\n        issuerUrl: \"\u2026\"\n    \"anonymous\":\n      priority: 1 # expired oidc token, missing creds, etc. default to anonymous access\n      anonymous: {}\n  authorization:\n    \"read-only-access-if-authn-fails\":\n      when:\n\n      - predicate: has(auth.identity.anonymous) && auth.identity.anonymous\n      patternMatching:\n        patterns:\n        - predicate: request.method == 'GET'\n
"},{"location":"authorino/docs/features/#festival-wristband-authentication","title":"Festival Wristband authentication","text":"

Authorino-issued Festival Wristband tokens can be validated as any other signed JWT using Authorino's JWT verification.

The value of the issuer must be the same issuer specified in the custom resource for the protected API originally issuing wristband. Eventually, this can be the same custom resource where the wristband is configured as a valid source of identity, but not necessarily.

"},{"location":"authorino/docs/features/#extra-auth-credentials-authenticationcredentials","title":"Extra: Auth credentials (authentication.credentials)","text":"

All the identity verification methods supported by Authorino can be configured regarding the location where access tokens and credentials (i.e. authentication secrets) fly within the request.

By default, authentication secrets are expected to be supplied in the Authorization HTTP header, with the default Bearer prefix and the plain authentication secret separated by space.

The full list of supported options is exemplified below:

spec:\n  authentication:\n    \"creds-in-the-authz-header\":\n      credentials:\n        authorizationHeader:\n          prefix: JWT\n\n    \"creds-in-a-custom-header\":\n      credentials:\n        customHeader:\n          name: X-MY-CUSTOM-HEADER\n          prefix: \"\"\n\n    \"creds-in-a-query-param\":\n      queryString:\n        name: my_param\n\n    \"creds-in-a-cookie-entry\":\n      cookie:\n        name: cookie-key\n
"},{"location":"authorino/docs/features/#extra-identity-extension-authenticationdefaults-and-authenticationoverrides","title":"Extra: Identity extension (authentication.defaults and authentication.overrides)","text":"

Resolved identity objects can be extended with user-defined JSON properties. Values can be static or fetched from the Authorization JSON.

A typical use-case for this feature is token normalization. Say you have more than one identity source listed in your AuthConfig but each source issues an access token with a different JSON structure \u2013 e.g. two OIDC issuers that use different names for custom JWT claims of similar meaning; when two different identity verification/authentication methods are combined, such as API keys (whose identity objects are the corresponding Kubernetes Secrets) and Kubernetes tokens (whose identity objects are Kubernetes UserInfo data).

In such cases, identity extension can be used to normalize the token to always include the same set of JSON properties of interest, regardless of the source of identity that issued the original token verified by Authorino. This simplifies the writing of authorization policies and configuration of dynamic responses.

In case of extending an existing property of the identity object (replacing), the API allows to control whether to overwrite the value or not. This is particularly useful for normalizing tokens of a same identity source that nonetheless may occasionally differ in structure, such as in the case of JWT claims that sometimes may not be present but can be safely replaced with another (e.g. username or sub).

"},{"location":"authorino/docs/features/#external-auth-metadata-features-metadata","title":"External auth metadata features (metadata)","text":""},{"location":"authorino/docs/features/#http-getget-by-post-metadatahttp","title":"HTTP GET/GET-by-POST (metadata.http)","text":"

Generic HTTP adapter that sends a request to an external service. It can be used to fetch external metadata for the authorization policies (phase ii of the Authorino Auth Pipeline), or as a web hook.

The adapter allows issuing requests either by GET or POST methods; in both cases with URL and parameters defined by the user in the spec. Dynamic values fetched from the Authorization JSON can be used.

POST request parameters as well as the encoding of the content can be controlled using the bodyParameters and contentType fields of the config, respectively. The Content-Type of POST requests can be either application/x-www-form-urlencoded (default) or application/json.

Authentication of Authorino with the external metadata server can be set either via long-lived shared secret stored in a Kubernetes Secret or via OAuth2 client credentials grant. For long-lived shared secret, set the sharedSecretRef field. For OAuth2 client credentials grant, use the oauth2 option.

In both cases, the location where the secret (long-lived or OAuth2 access token) travels in the request performed to the external HTTP service can be specified in the credentials field. By default, the authentication secret is supplied in the Authorization header with the Bearer prefix.

Custom headers can be set with the headers field. Nevertheless, headers such as Content-Type and Authorization (or eventual custom header used for carrying the authentication secret, set instead via the credentials option) will be superseded by the respective values defined for the fields contentType and sharedSecretRef.

"},{"location":"authorino/docs/features/#oidc-userinfo-metadatauserinfo","title":"OIDC UserInfo (metadata.userInfo)","text":"

Online fetching of OpenID Connect (OIDC) UserInfo data (phase ii of the Authorino Auth Pipeline), associated with an OIDC identity source configured and resolved in phase (i).

Apart from possibly complementing information of the JWT, fetching OpenID Connect UserInfo in request-time can be particularly useful for remote checking the state of the session, as opposed to only verifying the JWT/JWS offline.

Implementation requires a JWT verification authentication config (spec.authentication.jwt) in the same AuthConfig, so the well-known configuration of the OpenId Connect (OIDC) issuer can be reused.

The response returned by the OIDC server to the UserInfo request is appended (as JSON) to auth.metadata in the authorization JSON.

"},{"location":"authorino/docs/features/#user-managed-access-uma-resource-registry-metadatauma","title":"User-Managed Access (UMA) resource registry (metadata.uma)","text":"

User-Managed Access (UMA) is an OAuth-based protocol for resource owners to allow other users to access their resources. Since the UMA-compliant server is expected to know about the resources, Authorino includes a client that fetches resource data from the server and adds that as metadata of the authorization payload.

This enables the implementation of resource-level Attribute-Based Access Control (ABAC) policies. Attributes of the resource fetched in a UMA flow can be, e.g., the owner of the resource, or any business-level attributes stored in the UMA-compliant server.

A UMA-compliant server is an external authorization server (e.g., Keycloak) where the protected resources are registered. It can be as well the upstream API itself, as long as it implements the UMA protocol, with initial authentication by client_credentials grant to exchange for a Protected API Token (PAT).

It's important to notice that Authorino does NOT manage resources in the UMA-compliant server. As shown in the flow above, Authorino's UMA client is only to fetch data about the requested resources. Authorino exchanges client credentials for a Protected API Token (PAT), then queries for resources whose URI match the path of the HTTP request (as passed to Authorino by the Envoy proxy) and fetches data of each matching resource.

The resources data is added as metadata of the authorization payload and passed as input for the configured authorization policies. All resources returned by the UMA-compliant server in the query by URI are passed along. They are available in the PDPs (authorization payload) as input.auth.metadata.custom-name => Array. (See The \"Auth Pipeline\" for details.)

"},{"location":"authorino/docs/features/#authorization-features-authorization","title":"Authorization features (authorization)","text":""},{"location":"authorino/docs/features/#pattern-matching-authorization-authorizationpatternmatching","title":"Pattern-matching authorization (authorization.patternMatching)","text":"

Grant/deny access based on simple pattern-matching expressions (\"patterns\") compared against values selected from the Authorization JSON.

Each expression is composed of exactly one of the following options:

  1. a predicate field - Common Expression Language (CEL) expression that evaluates to a boolean value;
  2. a tuple composed of:
  3. selector: a JSON path to fetch a value from the Authorization JSON
  4. operator: one of: eq (equals), neq (not equal); incl (includes) and excl (excludes), for arrays; and matches, for regular expressions
  5. value: a static string value to compare the value selected from the Authorization JSON with;
  6. a patternRef field \u2013 value that maps to a predefined set of { selector, operator, value } tuples stored at the top-level of the AuthConfig spec (patterns).

Rules can mix and combine literal expressions and references to expression sets (\"named patterns\") defined at the upper level of the AuthConfig spec. (See Common feature: Conditions)

spec:\n  authorization:\n    \"my-simple-json-pattern-matching-policy\":\n      patternMatching:\n        patterns: # All patterns must match for access to be granted\n\n        - predicate: auth.identity.email_verified\n        - patternRef: admin\n\n  patterns:\n    admin: # a named pattern that can be reused in other sets of rules or conditions\n\n    - selector: auth.identity.roles\n      operator: incl\n      value: admin\n
"},{"location":"authorino/docs/features/#open-policy-agent-opa-rego-policies-authorizationopa","title":"Open Policy Agent (OPA) Rego policies (authorization.opa)","text":"

You can model authorization policies in Rego language and add them as part of the protection of your APIs.

Policies can be either declared in-line in Rego language (rego) or as an HTTP endpoint where Authorino will fetch the source code of the policy in reconciliation-time (externalPolicy).

Policies pulled from external registries can be configured to be automatically refreshed (pulled again from the external registry), by setting the authorization.opa.externalPolicy.ttl field (given in seconds, default: 0 \u2013 i.e. auto-refresh disabled).

Authorino's built-in OPA module precompiles the policies during reconciliation of the AuthConfig and caches the precompiled policies for fast evaluation in runtime, where they receive the Authorization JSON as input.

An optional field allValues: boolean makes the values of all rules declared in the Rego document to be returned in the OPA output after policy evaluation. When disabled (default), only the boolean value allow is returned. Values of internal rules of the Rego document can be referenced in subsequent policies/phases of the Auth Pipeline.

"},{"location":"authorino/docs/features/#kubernetes-subjectaccessreview-authorizationkubernetessubjectaccessreview","title":"Kubernetes SubjectAccessReview (authorization.kubernetesSubjectAccessReview)","text":"

Access control enforcement based on rules defined in the Kubernetes authorization system, i.e. Role, ClusterRole, RoleBinding and ClusterRoleBinding resources of Kubernetes RBAC.

Authorino issues a SubjectAccessReview (SAR) inquiry that checks with the underlying Kubernetes server whether the user can access a particular resource, resource kind or generic URL.

It supports resource attributes authorization check (parameters defined in the AuthConfig) and non-resource attributes authorization check (HTTP endpoint inferred from the original request).

  • Resource attributes: adequate for permissions set at namespace level, defined in terms of common attributes of operations on Kubernetes resources (namespace, API group, kind, name, subresource, verb)
  • Non-resource attributes: adequate for permissions set at cluster scope, defined for protected endpoints of a generic HTTP API (URL path + verb)

Example of Kubernetes role for resource attributes authorization:

apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: pet-reader\nrules:\n\n- apiGroups: [\"pets.io\"]\n  resources: [\"pets\"]\n  verbs: [\"get\"]\n

Example of Kubernetes cluster role for non-resource attributes authorization:

apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: pet-editor\nrules:\n\n- nonResourceURLs: [\"/pets/*\"]\n  verbs: [\"put\", \"delete\"]\n

Kubernetes' authorization policy configs look like the following in an Authorino AuthConfig:

authorization:\n  \"kubernetes-rbac\":\n    kubernetesSubjectAccessReview:\n      user: # values of the parameter can be fixed (`value`) or fetched from the Authorization JSON (`selector`)\n        expression: auth.identity.metadata.annotations.userid\n\n      groups: [] # user groups to test for.\n\n      # for resource attributes permission checks; omit it to perform a non-resource attributes SubjectAccessReview with path and method/verb assumed from the original request\n      # if included, use the resource attributes, where the values for each parameter can be fixed (`value`) or fetched from the Authorization JSON (`selector`)\n      resourceAttributes:\n        namespace:\n          value: default\n        group:\n          value: pets.io # the api group of the protected resource to be checked for permissions for the user\n        resource:\n          value: pets # the resource kind\n        name:\n          expression: request.path.split('/')[2] # resource name \u2013 e.g., the {id} in `/pets/{id}`\n        verb:\n          expression: request.method.lowerAscii() # api operation \u2013 e.g., copying from the context to use the same http method of the request\n

user and properties of resourceAttributes can be defined from fixed values or patterns of the Authorization JSON.

An array of groups (optional) can as well be set. When defined, it will be used in the SubjectAccessReview request.

"},{"location":"authorino/docs/features/#spicedb-authorizationspicedb","title":"SpiceDB (authorization.spicedb)","text":"

Check permission requests via gRPC with an external Google Zanzibar-inspired SpiceDB server, by Authzed.

Subject, resource and permission parameters can be set to static values or read from the Authorization JSON.

spec:\n  authorization:\n    \"spicedb\":\n      spicedb:\n        endpoint: spicedb:50051\n        insecure: true # disables TLS\n        sharedSecretRef:\n          name: spicedb\n          key: token\n        subject:\n          kind:\n            value: blog/user\n          name:\n            expression: auth.identity.sub\n        resource:\n          kind:\n            value: blog/post\n          name:\n            expression: request.path.split('/')[2] # /posts/{id}\n        permission:\n          expression: request.method\n
"},{"location":"authorino/docs/features/#custom-response-features-response","title":"Custom response features (response)","text":""},{"location":"authorino/docs/features/#custom-response-forms-successful-authorization-vs-custom-denial-status","title":"Custom response forms: successful authorization vs custom denial status","text":"

The response to the external authorization request can be customized in the following fashion:

  • Successful authorization (response.success)
  • Added HTTP headers (response.success.headers)
  • Envoy Dynamic Metadata (response.success.dynamicMetadata)
  • Custom denial status
  • Unauthenticated (response.unauthenticated)
  • Unauthorized (response.unauthorized)

Successful authorization custom responses can be set based on any of the supported custom authorization methods:

  • Plain text value
  • JSON injection
  • Festival Wristband Tokens
"},{"location":"authorino/docs/features/#added-http-headers","title":"Added HTTP headers","text":"

Set custom responses as HTTP headers injected in the request post-successful authorization by specifying one of the supported methods under response.success.headers.

The name of the response config (default) or the value of the key option (if provided) will used as the name of the header.

"},{"location":"authorino/docs/features/#envoy-dynamic-metadata","title":"Envoy Dynamic Metadata","text":"

Authorino custom response methods can also be used to propagate Envoy Dynamic Metadata. To do so, set one of the supported methods under response.success.dynamicMetadata.

The name of the response config (default) or the value of the key option (if provided) will used as the name of the root property of the dynamic metadata content.

A custom response exported as Envoy Dynamic Metadata can be set in the Envoy route or virtual host configuration as input to a consecutive filter in the filter chain.

E.g., to read metadata emitted by the authorization service with scheme { \"auth-data\": { \"api-key-ns\": string, \"api-key-name\": string } }, as input in a rate limit configuration placed in the filter chain after the external authorization, the Envoy config may look like the following:

# Envoy config snippet to inject `user_namespace` and `username` rate limit descriptors from metadata emitted by Authorino\nrate_limits:\n\n- actions:\n  - metadata:\n      metadata_key:\n        key: \"envoy.filters.http.ext_authz\"\n        path:\n        - key: auth-data # root of the dynamic metadata object, as declared in a custom response config of the AuthConfig (name or key)\n        - key: api-key-ns\n      descriptor_key: user_namespace\n  - metadata:\n      metadata_key:\n        key: \"envoy.filters.http.ext_authz\"\n        path:\n        - key: auth-data # root of the dynamic metadata object, as declared in a custom response config of the AuthConfig (name or key)\n        - key: api-key-name\n      descriptor_key: username\n
"},{"location":"authorino/docs/features/#custom-denial-status-responseunauthenticated-and-responseunauthorized","title":"Custom denial status (response.unauthenticated and response.unauthorized)","text":"

By default, Authorino will inform Envoy to respond with 401 Unauthorized or 403 Forbidden respectively when the identity verification (phase i of the Auth Pipeline) or authorization (phase ii) fail. These can be customized respectively by specifying spec.response.unauthanticated and spec.response.unauthorized in the AuthConfig.

"},{"location":"authorino/docs/features/#custom-response-methods","title":"Custom response methods","text":""},{"location":"authorino/docs/features/#plain-text-responsesuccessheadersdynamicmetadataplain","title":"Plain text (response.success.<headers|dynamicMetadata>.plain)","text":"

Simpler, yet more generalized form, for extending the authorization response for header mutation and Envoy Dynamic Metadata, based on plain text values.

The value can be static:

response:\n  success:\n    headers:\n      \"x-auth-service\"\n        plain:\n          value: Authorino\n

or fetched dynamically from the Authorization JSON (which includes support for interpolation):

response:\n  success:\n    headers:\n      \"x-username\":\n        plain:\n          expression: auth.identity.username\n
"},{"location":"authorino/docs/features/#json-injection-responsesuccessheadersdynamicmetadatajson","title":"JSON injection (response.success.<headers|dynamicMetadata>.json)","text":"

User-defined dynamic JSON objects generated by Authorino in the response phase, from static or dynamic data of the auth pipeline, and passed back to the external authorization client within added HTTP headers or Dynamic Metadata.

The following Authorino AuthConfig custom resource is an example that defines 3 dynamic JSON response items, where two items are returned to the client, stringified, in added HTTP headers, and the third as Envoy Dynamic Metadata. Envoy proxy can be configured to propagate the dynamic metadata emitted by Authorino into another filter \u2013 e.g. the rate limit filter.

apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  namespace: my-namespace\n  name: my-api-protection\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"edge\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino.kuadrant.io/managed-by: authorino\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  response:\n    success:\n      headers:\n        \"x-my-custom-header\":\n          json:\n            properties:\n              \"prop1\":\n                value: value1\n              \"prop2\":\n                expression: some.path.within.auth.json\n        \"x-ext-auth-other-json\":\n          json:\n            properties:\n              \"propX\":\n                value: valueX\n\n      dynamicMetadata:\n        \"auth-data\":\n          json:\n            properties:\n              \"api-key-ns\":\n                expression: auth.identity.metadata.namespace\n              \"api-key-name\":\n                expression: auth.identity.metadata.name\n
"},{"location":"authorino/docs/features/#festival-wristband-tokens-responsesuccessheadersdynamicmetadatawristband","title":"Festival Wristband tokens (response.success.<headers|dynamicMetadata>.wristband)","text":"

Festival Wristbands are signed OpenID Connect JSON Web Tokens (JWTs) issued by Authorino at the end of the auth pipeline and passed back to the client, typically in added HTTP response header. It is an opt-in feature that can be used to implement Edge Authentication Architecture (EAA) and enable token normalization. Authorino wristbands include minimal standard JWT claims such as iss, iat, and exp, and optional user-defined custom claims, whose values can be static or dynamically fetched from the authorization JSON.

The Authorino AuthConfig custom resource below sets an API protection that issues a wristband after a successful authentication via API key. Apart from standard JWT claims, the wristband contains 2 custom claims: a static value aud=internal and a dynamic value born that fetches from the authorization JSON the date/time of creation of the secret that represents the API key used to authenticate.

apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  namespace: my-namespace\n  name: my-api-protection\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"edge\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino.kuadrant.io/managed-by: authorino\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  response:\n    success:\n      headers:\n        \"x-wristband\":\n          wristband:\n            issuer: https://authorino-oidc.default.svc:8083/my-namespace/my-api-protection/x-wristband\n            customClaims:\n              \"aud\":\n                value: internal\n              \"age\":\n                expression: int(request.time.seconds) - (timestamp(auth.identity.metadata.creationTimestamp) - timestamp(\"1970-01-01T00:00:00Z\")).getSeconds()\n            tokenDuration: 300\n            signingKeyRefs:\n            - name: my-signing-key\n              algorithm: ES256\n            - name: my-old-signing-key\n              algorithm: RS256\n

The signing key names listed in signingKeyRefs must match the names of Kubernetes Secret resources created in the same namespace, where each secret contains a key.pem entry that holds the value of the private key that will be used to sign the wristbands issued, formatted as PEM. The first key in this list will be used to sign the wristbands, while the others are kept to support key rotation.

For each protected API configured for the Festival Wristband issuing, Authorino exposes the following OpenID Connect Discovery well-known endpoints (available for requests within the cluster):

  • OpenID Connect configuration: https://authorino-oidc.default.svc:8083/{namespace}/{api-protection-name}/{response-config-name}/.well-known/openid-configuration
  • JSON Web Key Set (JWKS) well-known endpoint: https://authorino-oidc.default.svc:8083/{namespace}/{api-protection-name}/{response-config-name}/.well-known/openid-connect/certs
"},{"location":"authorino/docs/features/#callbacks-callbacks","title":"Callbacks (callbacks)","text":""},{"location":"authorino/docs/features/#http-endpoints-callbackshttp","title":"HTTP endpoints (callbacks.http)","text":"

Sends requests to specified HTTP endpoints at the end of the auth pipeline.

The scheme of the http field is the same as of metadata.http.

Example:

spec:\n  authentication: [\u2026]\n  authorization: [\u2026]\n\n  callbacks:\n    \"log\":\n      http:\n        url: http://logsys\n        method: POST\n        body:\n          expression: |\n            { \"requestId\": request.id, \"username\": auth.identity.username, \"authorizationResult\": auth.authorization }\n    \"important-forbidden\":\n      when:\n\n      - predicate: \"!auth.authorization.important-policy\"\n      http:\n        urlExpression: |\n          \"http://monitoring/important?forbidden-user=\" + auth.identity.username\n
"},{"location":"authorino/docs/features/#common-feature-priorities","title":"Common feature: Priorities","text":"

Priorities allow to set sequence of execution for blocks of concurrent evaluators within phases of the Auth Pipeline.

Evaluators of same priority execute concurrently to each other \"in a block\". After syncing that block (i.e. after all evaluators of the block have returned), the next block of evaluator configs of consecutive priority is triggered.

Use cases for priorities are:

  1. Saving expensive tasks to be triggered when there's a high chance of returning immediately after finishing executing a less expensive one \u2013 e.g.
    • an identity config that calls an external IdP to verify a token that is rarely used, compared to verifying JWTs preferred by most users of the service;
    • an authorization policy that performs some quick checks first, such as verifying allowed paths, and only if it passes, moves to the evaluation of a more expensive policy.
  2. Establishing dependencies between evaluators - e.g.
    • an external metadata request that needs to wait until a previous metadata responds first (in order to use data from the response)

Priorities can be set using the priority property available in all evaluator configs of all phases of the Auth Pipeline (identity, metadata, authorization and response). The lower the number, the highest the priority. By default, all evaluators have priority 0 (i.e. highest priority).

Consider the following example to understand how priorities work:

apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api\n  authentication:\n    \"tier-1\":\n      priority: 0\n      apiKey:\n        selector:\n          matchLabels:\n            tier: \"1\"\n    \"tier-2\":\n      priority: 1\n      apiKey:\n        selector:\n          matchLabels:\n            tier: \"2\"\n    \"tier-3\":\n      priority: 1\n      apiKey:\n        selector:\n          matchLabels:\n            tier: \"3\"\n  metadata:\n    \"first\":\n      http:\n        url: http://talker-api:3000\n    \"second\":\n      priority: 1\n      http:\n        url: http://talker-api:3000/first_uuid={auth.metadata.first.uuid}\n  authorization:\n    \"allowed-endpoints\":\n      when:\n      - predicate: |\n          !(request.path in ['/hi', '/hello', '/aloha', '/ciao'])\n      patternMatching:\n        patterns:\n        - pattern: \"true\"\n    \"more-expensive-policy\": # no point in evaluating this one if it's not an allowed endpoint\n      priority: 1\n      opa:\n        rego: |\n          allow { true }\n  response:\n    success:\n      headers:\n        \"x-auth-data\":\n          json:\n            properties:\n              \"tier\":\n                expression: auth.identity.metadata.labels.tier\n              \"first-uuid\":\n                expression: auth.metadata.first.uuid\n              \"second-uuid\":\n                expression: auth.metadata.second.uuid\n              \"second-path\":\n                expression: auth.metadata.second.path\n

For the AuthConfig above,

  • Identity configs tier-2 and tier-3 (priority 1) will only trigger (concurrently) in case tier-1 (priority 0) fails to validate the authentication token first. (This behavior happens without prejudice of context canceling between concurrent evaluators \u2013 i.e. evaluators that are triggered concurrently to another, such as tier-2 and tier-3, continue to cancel the context of each other if any of them succeeds validating the token first.)

  • Metadata source second (priority 1) uses the response of the request issued by metadata source first (priority 0), so it will wait for first to finish by triggering only in the second block.

  • Authorization policy allowed-endpoints (priority 0) is considered to be a lot less expensive than more-expensive-policy (priority 1) and has a high chance of denying access to the protected service (if the path is not one of the allowed endpoints). By setting different priorities to these policies we ensure the more expensive policy if triggered in sequence of the less expensive one, instead of concurrently.

"},{"location":"authorino/docs/features/#common-feature-conditions-when","title":"Common feature: Conditions (when)","text":"

Conditions, identified by the when field in the AuthConfig API, are logical expressions (\"predicates\") that can be used to condition the evaluation of a particular auth rule, as well as of the AuthConfig altogether (\"top-level conditions\").

The predicates are evaluated against the Authorization JSON, where each predicate is composed of exactly one of the following options:

  1. a predicate field \u2013 CEL expression that evaluates to a boolean value;
  2. a tuple composed of:
  3. selector: a JSON path to fetch a value from the Authorization JSON
  4. operator: one of: eq (equals); neq (not equal); incl (includes) and excl (excludes), for when the value fetched from the Authorization JSON is expected to be an array; matches, for regular expressions
  5. value: a static string value to compare the value selected from the Authorization JSON with;
  6. a patternRef field \u2013 value that maps to a predefined set of { selector, operator, value } tuples stored at the top-level of the AuthConfig spec (patterns).

An expression contains one or more patterns and they must either all evaluate to true (\"AND\" operator, declared by grouping the patterns within an all block) or at least one of the patterns must be true (\"OR\" operator, when grouped within an any block.) Patterns not explicitly grouped are AND'ed by default.

Examples of when conditions

i) to skip an entire AuthConfig based on the context (AND operator assumed by default):

spec:\n  when: # auth enforced only on requests to POST /resources/*\n\n  - predicate: request.method == 'POST' && request.path.matches(\"^/resources/.*\")\n

ii) equivalent to the above using { selector, operator, value } tuples and an explicit AND operator (all):

spec:\n  when: # auth enforced only on requests to POST /resources/*\n\n  - all:\n    - selector: request.method\n      operator: eq\n      value: POST\n    - selector: request.path\n      operator: matches\n      value: ^/resources/.*\n

iii) OR condition (any) using { selector, operator, value } tuples:

spec:\n  when: # auth enforced only on requests with HTTP method equals to POST or PUT\n\n  - any:\n    - selector: request.method\n      operator: eq\n      value: POST\n    - selector: request.method\n      operator: eq\n      value: PUT\n

iv) complex expression with nested operations using { selector, operator, value } tuples:

spec:\n  when: # auth enforced only on requests to POST /resources/* or PUT /resources/*\n\n  - any:\n    - all:\n      - selector: request.method\n        operator: eq\n        value: POST\n      - selector: request.path\n        operator: matches\n        value: ^/resources/.*\n    - all:\n      - selector: request.method\n        operator: eq\n        value: PUT\n      - selector: request.path\n        operator: matches\n        value: ^/resources/.*\n

v) more concise equivalent of the above using CEL:

spec:\n  when: # auth enforced only on requests to /resources/* path with method equals to POST or PUT\n\n  - predicate: request.path .matches(\"^/resources/.*\") && request.method in ['POST', 'PUT']\n

vi) to skip part of an AuthConfig (i.e., a specific auth rule):

spec:\n  metadata:\n    \"metadata-source\":\n      http:\n        url: https://my-metadata-source.io\n      when: # only fetch the external metadata if the context is HTTP method other than OPTIONS\n\n      - predicate: request.method != 'OPTIONS'\n

vii) skipping part of an AuthConfig will not affect other auth rules:

spec:\n  authentication:\n    \"authn-meth-1\":\n      apiKey: {\u2026} # this auth rule only triggers for POST requests to /foo[/*]\n      when:\n\n      - predicate: request.method == 'POST' && request.path.matches(\"^/foo(/.*)?$\")\n\n    \"authn-meth-2\": # this auth rule triggerred regardless\n      jwt: {\u2026}\n

viii) concrete use-case: evaluating only the necessary identity checks based on the user's indication of the preferred authentication method (prefix of the value supplied in the HTTP Authorization request header):

spec:\n  authentication:\n    \"jwt\":\n      when:\n\n      - predicate: request.headers['authorization'].startsWith('JWT')\n      jwt: {\u2026}\n\n    \"api-key\":\n      when:\n\n      - predicate: request.headers['authorization'].startsWith('APIKEY')\n      apiKey: {\u2026}\n

ix) to avoid repetition while defining patterns for conditions:

spec:\n  patterns:\n    a-pet: # a named pattern that can be reused in sets of conditions\n\n    - selector: context.request.http.path\n      operator: matches\n      value: ^/pets/\\d+(/.*)$\n\n  metadata:\n    \"pets-info\":\n      when:\n\n      - patternRef: a-pet\n      http:\n        urlExpression: |\n          \"https://pets-info.io?petId=\" + request.path.split('/')[2]\n\n  authorization:\n    \"pets-owners-only\":\n      when:\n\n      - patternRef: a-pet\n      opa:\n        rego: |\n          allow { input.metadata[\"pets-info\"].ownerid == input.auth.identity.userid }\n

x) combining literals and refs \u2013 concrete case: authentication required for selected operations:

spec:\n  patterns:\n    api-base-path:\n\n    - selector: request.path\n      operator: matches\n      value: ^/api/.*\n\n    authenticated-user:\n\n    - selector: auth.identity.anonymous\n      operator: neq\n      value: \"true\"\n\n  authentication:\n    api-users: # tries to authenticate all requests to path /api/*\n      when:\n\n      - patternRef: api-base-path\n      jwt: {\u2026}\n\n    others: # defaults to anonymous access when authentication fails or not /api/* path\n      anonymous: {}\n      priority: 1\n\n  authorization:\n    api-write-access-requires-authentication: # POST/PUT/DELETE requests to /api/* path cannot be anonymous\n      when:\n\n      - patternRef: api-base-path\n      - predicate: request.method in ['POST', 'PUT', 'DELETE']\n      opa:\n        patternMatching:\n          rules:\n          - patternRef: authenticated-user\n\n  response: # bonus: export user data if available\n    success:\n      dynamicMetadata:\n        \"user-data\":\n          when:\n\n          - patternRef: authenticated-user\n          json:\n            properties:\n              jwt-claims:\n                expression: auth.identity\n
"},{"location":"authorino/docs/features/#common-feature-caching-cache","title":"Common feature: Caching (cache)","text":"

Objects resolved at runtime in an Auth Pipeline can be cached \"in-memory\", and avoided being evaluated again at a subsequent request, until it expires. A lookup cache key and a TTL can be set individually for any evaluator config in an AuthConfig.

Each cache config induces a completely independent cache table (or \"cache namespace\"). Consequently, different evaluator configs can use the same cache key and there will be no collision between entries from different evaluators.

E.g.:

spec:\n  hosts:\n\n  - my-api.io\n\n  authentication: [\u2026]\n\n  metadata:\n    \"external-metadata\":\n      http:\n        urlExpression: |\n          \"http://my-external-source?search=\" + request.path\n      cache:\n        key:\n          expression: request.path\n        ttl: 300\n\n  authorization:\n    \"complex-policy\":\n      opa:\n        externalPolicy:\n          url: http://my-policy-registry\n      cache:\n        key:\n          expression: auth.identity.group + '-' + request.method + '-' + request.path\n        ttl: 60\n

The example above sets caching for the 'external-metadata' metadata config and for the 'complex-policy' authorization policy. In the case of 'external-metadata', the cache key is the path of the original HTTP request being authorized by Authorino (fetched dynamically from the Authorization JSON); i.e., after obtaining a metadata object from the external source for a given contextual HTTP path one first time, whenever that same HTTP path repeats in a subsequent request, Authorino will use the cached object instead of sending a request again to the external source of metadata. After 5 minutes (300 seconds), the cache entry will expire and Authorino will fetch again from the source if requested.

As for the 'complex-policy' authorization policy, the cache key is a string composed the 'group' the identity belongs to, the method of the HTTP request and the path of the HTTP request. Whenever these repeat, Authorino will use the result of the policy that was evaluated and cached priorly. Cache entries in this namespace expire after 60 seconds.

Notes on evaluator caching

Capacity - By default, each cache namespace is limited to 1 mb. Entries will be evicted following First-In-First-Out (FIFO) policy to release space. The individual capacity of cache namespaces is set at the level of the Authorino instance (via --evaluator-cache-size command-line flag or spec.evaluatorCacheSize field of the Authorino CR).

Usage - Avoid caching objects whose evaluation is considered to be relatively cheap. Examples of operations associated to Authorino auth features that are usually NOT worth caching: validation of JSON Web Tokens (JWT), Kubernetes TokenReviews and SubjectAccessReviews, API key validation, simple JSON pattern-matching authorization rules, simple OPA policies. Examples of operations where caching may be desired: OAuth2 token introspection, fetching of metadata from external sources (via HTTP request), complex OPA policies.

"},{"location":"authorino/docs/features/#common-feature-metrics-metrics","title":"Common feature: Metrics (metrics)","text":"

By default, Authorino will only export metrics down to the level of the AuthConfig. Deeper metrics at the level of each evaluator within an AuthConfig can be activated by setting the common field metrics: true of the evaluator config.

E.g.:

apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-authconfig\n  namespace: my-ns\nspec:\n  metadata:\n    \"my-external-metadata\":\n      http:\n        url: http://my-external-source?search={request.path}\n      metrics: true\n

The above will enable the metrics auth_server_evaluator_duration_seconds (histogram) and auth_server_evaluator_total (counter) with labels namespace=\"my-ns\", authconfig=\"my-authconfig\", evaluator_type=\"METADATA_GENERIC_HTTP\" and evaluator_name=\"my-external-metadata\".

The same pattern works for other types of evaluators. Find below the list of all types and corresponding label constant used in the metric:

Evaluator type Metric's evaluator_type label authentication.apiKey IDENTITY_APIKEY authentication.kubernetesTokenReview IDENTITY_KUBERNETES authentication.jwt IDENTITY_OIDC authentication.oauth2Introspection IDENTITY_OAUTH2 authentication.x509 IDENTITY_MTLS authentication.plain IDENTITY_PLAIN authentication.anonymous IDENTITY_NOOP metadata.http METADATA_GENERIC_HTTP metadata.userInfo METADATA_USERINFO metadata.uma METADATA_UMA authorization.patternMatching AUTHORIZATION_JSON authorization.opa AUTHORIZATION_OPA authorization.kubernetesSubjectAccessReview AUTHORIZATION_KUBERNETES authorization.spicedb AUTHORIZATION_AUTHZED response.success..plain RESPONSE_PLAIN response.success..json RESPONSE_JSON response.success..wristband RESPONSE_WRISTBAND

Metrics at the level of the evaluators can also be enforced to an entire Authorino instance, by setting the --deep-metrics-enabled command-line flag. In this case, regardless of the value of the field spec.(authentication|metadata|authorization|response).metrics in the AuthConfigs, individual metrics for all evaluators of all AuthConfigs will be exported.

For more information about metrics exported by Authorino, see Observability.

"},{"location":"authorino/docs/getting-started/","title":"Getting started","text":"

This page covers requirements and instructions to deploy Authorino on a Kubernetes cluster, as well as the steps to declare, apply and try out a protection layer of authentication and authorization over your service, clean-up and complete uninstallation.

If you prefer learning with an example, check out our Hello World.

"},{"location":"authorino/docs/getting-started/#requirements","title":"Requirements","text":""},{"location":"authorino/docs/getting-started/#platform-requirements","title":"Platform requirements","text":"

These are the platform requirements to use Authorino:

  • Kubernetes server (recommended v1.21 or later), with permission to create Kubernetes Custom Resource Definitions (CRDs) (for bootstrapping Authorino and Authorino Operator)

    Alternative: K8s distros and platforms

    Alternatively to upstream Kubernetes, you should be able to use any other Kubernetes distribution or Kubernetes Management Platform (KMP) with support for Kubernetes Custom Resources Definitions (CRD) and custom controllers, such as Red Hat OpenShift, IBM Cloud Kubernetes Service (IKS), Google Kubernetes Engine (GKE), Amazon Elastic Kubernetes Service (EKS) and Azure Kubernetes Service (AKS).

  • Envoy proxy (recommended v1.19 or later), to wire up Upstream services (i.e. the services to be protected with Authorino) and external authorization filter (Authorino) for integrations based on the reverse-proxy architecture - example

    Alternative: Non-reverse-proxy integration

    Technically, any client that implements Envoy's external authorization gRPC protocol should be compatible with Authorino. For integrations based on the reverse-proxy architecture nevertheless, we strongly recommended that you leverage Envoy alongside Authorino.

    "},{"location":"authorino/docs/getting-started/#feature-specific-requirements","title":"Feature-specific requirements","text":"

    A few examples are:

    • For OpenID Connect, make sure you have access to an identity provider (IdP) and an authority that can issue ID tokens (JWTs). Check out Keycloak which can solve both and connect to external identity sources and user federation like LDAP.

    • For Kubernetes authentication tokens, platform support for the TokenReview and SubjectAccessReview APIs of Kubernetes shall be required. In case you want to be able to requests access tokens for clients running outside the custer, you may also want to check out the requisites for using Kubernetes TokenRequest API (GA in v1.20).

    • For User-Managed Access (UMA) resource data, you will need a UMA-compliant server running as well. This can be an implementation of the UMA protocol by each upstream API itself or (more typically) an external server that knows about the resources. Again, Keycloak can be a good fit here as well. Just keep in mind that, whatever resource server you choose, changing-state actions commanded in the upstream APIs or other parties will have to be reflected in the resource server. Authorino will not do that for you.

    Check out the Feature specification page for more feature-specific requirements.

    "},{"location":"authorino/docs/getting-started/#installation","title":"Installation","text":""},{"location":"authorino/docs/getting-started/#step-install-the-authorino-operator","title":"Step: Install the Authorino Operator","text":"

    The simplest way to install the Authorino Operator is by applying the manifest bundle:

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n

    The above will install the latest build of the Authorino Operator and latest version of the manifests (CRDs and RBAC), which by default points as well to the latest build of Authorino, both based on the main branches of each component. To install a stable released version of the Operator and therefore also defaults to its latest compatible stable release of Authorino, replace main with another tag of a proper release of the Operator, e.g. 'v0.2.0'.

    This step will also install cert-manager in the cluster (required).

    Alternatively, you can deploy the Authorino Operator using the Operator Lifecycle Manager bundles. For instructions, check out Installing via OLM.

    "},{"location":"authorino/docs/getting-started/#step-request-an-authorino-instance","title":"Step: Request an Authorino instance","text":"

    Choose either cluster-wide or namespaced deployment mode and whether you want TLS termination enabled for the Authorino endpoints (gRPC authorization, raw HTTP authorization, and OIDC Festival Wristband Discovery listeners), and follow the corresponding instructions below.

    The instructions here are for centralized gateway or centralized authorization service architecture. Check out the Topologies section of the docs for alternatively running Authorino in a sidecar container.

    Cluster-wide (with TLS)

    Create the namespace:

    kubectl create namespace authorino\n

    Create the TLS certificates (requires cert-manager; skip if you already have certificates and certificate keys created and stored in Kubernetes Secrets in the namespace):

    curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/authorino/g\" | kubectl -n authorino apply -f -\n

    Deploy Authorino:

    kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  replicas: 1\n  clusterWide: true\n  listener:\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-server-cert\n  oidcServer:\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-oidc-server-cert\nEOF\n

    Cluster-wide (without TLS)
    kubectl create namespace authorino\nkubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  image: quay.io/kuadrant/authorino:latest\n  replicas: 1\n  clusterWide: true\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    Namespaced (with TLS)

    Create the namespace:

    kubectl create namespace myapp\n

    Create the TLS certificates (requires cert-manager; skip if you already have certificates and certificate keys created and stored in Kubernetes Secrets in the namespace):

    curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/myapp/g\" | kubectl -n myapp apply -f -\n

    Deploy Authorino:

    kubectl -n myapp apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  image: quay.io/kuadrant/authorino:latest\n  replicas: 1\n  clusterWide: false\n  listener:\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-server-cert\n  oidcServer:\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-oidc-server-cert\nEOF\n

    Namespaced (without TLS)
    kubectl create namespace myapp\nkubectl -n myapp apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  image: quay.io/kuadrant/authorino:latest\n  replicas: 1\n  clusterWide: false\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/getting-started/#protect-a-service","title":"Protect a service","text":"

    The most typical integration to protect services with Authorino is by putting the service (upstream) behind a reverse-proxy or API gateway, enabled with an authorization filter that ensures all requests to the service are first checked with the authorization server (Authorino).

    To do that, make sure you have your upstream service deployed and running, usually in the same Kubernetes server where you installed Authorino. Then, setup an Envoy proxy and create an Authorino AuthConfig for your service.

    Authorino exposes 2 interfaces to serve the authorization requests:

    • a gRPC interface that implements Envoy's External Authorization protocol;
    • a raw HTTP authorization interface, suitable for using Authorino with Kubernetes ValidatingWebhook, for Envoy external authorization via HTTP, and other integrations (e.g. other proxies).

    To use Authorino as a simple satellite (sidecar) Policy Decision Point (PDP), applications can integrate directly via any of these interfaces. By integrating via a proxy or API gateway, the combination makes Authorino to perform as an external Policy Enforcement Point (PEP) completely decoupled from the application.

    "},{"location":"authorino/docs/getting-started/#life-cycle","title":"Life cycle","text":""},{"location":"authorino/docs/getting-started/#step-setup-envoy","title":"Step: Setup Envoy","text":"

    To configure Envoy for proxying requests targeting the upstream service and authorizing with Authorino, setup an Envoy configuration that enables Envoy's external authorization HTTP filter. Store the configuration in a ConfigMap.

    These are the important bits in the Envoy configuration to activate Authorino:

    static_resources:\n  listeners:\n\n  - address: {\u2026} # TCP socket address and port of the proxy\n    filter_chains:\n    - filters:\n      - name: envoy.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          route_config: {\u2026} # routing configs - virtual host domain and endpoint matching patterns and corresponding upstream services to redirect the traffic\n          http_filters:\n          - name: envoy.filters.http.ext_authz # the external authorization filter\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n              transport_api_version: V3\n              failure_mode_allow: false # ensures only authenticated and authorized traffic goes through\n              grpc_service:\n                envoy_grpc:\n                  cluster_name: authorino\n                timeout: 1s\n  clusters:\n  - name: authorino\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: authorino\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: authorino-authorino-authorization # name of the Authorino service deployed \u2013 it can be the fully qualified name with `.<namespace>.svc.cluster.local` suffix (e.g. `authorino-authorino-authorization.myapp.svc.cluster.local`)\n                port_value: 50051\n    transport_socket: # in case TLS termination is enabled in Authorino; omit it otherwise\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n        common_tls_context:\n          validation_context:\n            trusted_ca:\n              filename: /etc/ssl/certs/authorino-ca-cert.crt\n

    For a complete Envoy ConfigMap containing an upstream API protected with Authorino, with TLS enabled and option for rate limiting with Limitador, plus a webapp served with under the same domain of the protected API, check out this example.

    After creating the ConfigMap with the Envoy configuration, create an Envoy Deployment and Service. E.g.:

    kubectl -n myapp apply -f -<<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: envoy\n  labels:\n    app: envoy\nspec:\n  selector:\n    matchLabels:\n      app: envoy\n  template:\n    metadata:\n      labels:\n        app: envoy\n    spec:\n      containers:\n\n        - name: envoy\n          image: envoyproxy/envoy:v1.19-latest\n          command: [\"/usr/local/bin/envoy\"]\n          args:\n            - --config-path /usr/local/etc/envoy/envoy.yaml\n            - --service-cluster front-proxy\n            - --log-level info\n            - --component-log-level filter:trace,http:debug,router:debug\n          ports:\n            - name: web\n              containerPort: 8000 # matches the address of the listener in the envoy config\n          volumeMounts:\n            - name: config\n              mountPath: /usr/local/etc/envoy\n              readOnly: true\n            - name: authorino-ca-cert # in case TLS termination is enabled in Authorino; omit it otherwise\n              subPath: ca.crt\n              mountPath: /etc/ssl/certs/authorino-ca-cert.crt\n              readOnly: true\n      volumes:\n        - name: config\n          configMap:\n            name: envoy\n            items:\n              - key: envoy.yaml\n                path: envoy.yaml\n        - name: authorino-ca-cert # in case TLS termination is enabled in Authorino; omit it otherwise\n          secret:\n            defaultMode: 420\n            secretName: authorino-ca-cert\n  replicas: 1\nEOF\n
    kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Service\nmetadata:\n  name: envoy\nspec:\n  selector:\n    app: envoy\n  ports:\n\n    - name: web\n      port: 8000\n      protocol: TCP\nEOF\n
    "},{"location":"authorino/docs/getting-started/#step-apply-an-authconfig","title":"Step: Apply an AuthConfig","text":"

    Check out the docs for a full description of Authorino's AuthConfig Custom Resource Definition (CRD) and its features.

    For examples based on specific use-cases, check out the User guides.

    For authentication based on OpenID Connect (OIDC) JSON Web Tokens (JWT), plus one simple JWT claim authorization check, a typical AuthConfig custom resource looks like the following:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\nspec:\n  hosts: # any hosts that resolve to the envoy service and envoy routing config where the external authorization filter is enabled\n\n  - my-api.io # north-south traffic through a Kubernetes `Ingress` or OpenShift `Route`\n  - my-api.myapp.svc.cluster.local # east-west traffic (between applications within the cluster)\n  authentication:\n    \"idp-users\":\n      jwt:\n        issuerUrl: https://my-idp.com/auth/realm\n  authorization:\n    \"check-claim\":\n      patternMatching:\n        patterns:\n        - selector: auth.identity.group\n          operator: eq\n          value: allowed-users\nEOF\n

    After applying the AuthConfig, consumers of the protected service should be able to start sending requests.

    "},{"location":"authorino/docs/getting-started/#clean-up","title":"Clean-up","text":""},{"location":"authorino/docs/getting-started/#remove-protection","title":"Remove protection","text":"

    Delete the AuthConfig:

    kubectl -n myapp delete authconfig/my-api-protection\n

    Decommission the Authorino instance:

    kubectl -n myapp delete authorino/authorino\n
    "},{"location":"authorino/docs/getting-started/#uninstall","title":"Uninstall","text":"

    To completely remove Authorino CRDs, run from the Authorino Operator directory:

    make uninstall\n
    "},{"location":"authorino/docs/getting-started/#next-steps","title":"Next steps","text":"
    1. Read the docs. The Architecture page and the Features page are good starting points to learn more about how Authorino works and its functionalities.
    2. Check out the User guides for several examples of AuthConfigs based on specific use-cases
    "},{"location":"authorino/docs/terminology/","title":"Terminology","text":"

    Here we define some terms that are used in the project, with the goal of avoiding confusion and facilitating more accurate conversations related to Authorino.

    If you see terms used that are not here (or are used in place of terms here) please consider contributing a definition to this doc with a PR, or modifying the use elsewhere to align with these terms.

    "},{"location":"authorino/docs/terminology/#terms","title":"Terms","text":"

    Access token Type of temporary password (security token), tied to an authenticated identity, issued by an auth server as of request from either the identity subject itself or a registered auth client known by the auth server, and that delegates to a party powers to operate on behalf of that identity before a resource server; it can be formatted as an opaque data string or as an encoded JSON Web Token (JWT).

    Application Programming Interface (API) Interface that defines interactions between multiple software applications; (in HTTP communication) set of endpoints and specification to expose resources hosted by a resource server, to be consumed by client applications; the access facade of a resource server.

    Attribute-based Access Control (ABAC) Authorization model that grants/denies access to resources based on evaluation of authorization policies which combine attributes together (from claims, from the request, from the resource, etc).

    Auth Usually employed as a short for authentication and authorization together (AuthN/AuthZ).

    Auth client Application client (software) that uses an auth server, either in the process of authenticating and/or authorizing identity subjects (including self) who want to consume resources from a resources server or auth server.

    Auth server Server where auth clients, users, roles, scopes, resources, policies and permissions can be stored and managed.

    Authentication (AuthN) Process of verifying that a given credential belongs to a claimed-to-be identity; usually resulting in the issuing of an access token.

    Authorization (AuthZ) Process of granting (or denying) access over a resource to a party based on the set of authorization rules, policies and/or permissions enforced.

    Authorization header HTTP request header frequently used to carry credentials to authenticate a user in an HTTP communication, like in requests sent to an API; alternatives usually include credentials carried in another (custom) HTTP header, query string parameter or HTTP cookie.

    Capability Usually employed to refer to a management feature of a Kubernetes-native system, based on the definition and use of Kubernetes Custom Resources (CRDs and CRs), that enables that system to one of the following \u201ccapability levels\u201d: Basic Install, Seamless Upgrades, Full Lifecycle, Deep Insights, Auto Pilot.

    Claim Attribute packed in a security token which represents a claim that one who bears the token is making about an entity, usually an identity subject.

    Client ID Unique identifier of an auth client within an auth server domain (or auth server realm).

    Client secret Password presented by auth clients together with their Client IDs while authenticating with an auth server, either when requesting access tokens to be issued or when consuming services from the auth servers in general.

    Delegation Process of granting a party (usually an auth client) with powers to act, often with limited scope, on behalf of an identity, to access resources from a resource server. See also OAuth2.

    Hash-based Message Authentication Code (HMAC) Specific type of message authentication code (MAC) that involves a cryptographic hash function and a shared secret cryptographic key; it can be used to verify the authenticity of a message and therefore as an authentication method.

    Identity Set of properties that qualifies a subject as a strong identifiable entity (usually a user), who can be authenticated by an auth server. See also Claims.

    Identity and Access Management (IAM) system Auth system that implements and/or connects with sources of identity (IdP) and offers interfaces for managing access (authorization policies and permissions). See also Auth server.

    Identity Provider (IdP) Source of identity; it can be a feature of an auth server or external source connected to an auth server.

    ID token Special type of access token; an encoded JSON Web Token (JWT) that packs claims about an identity.

    JSON Web Token (JWT) JSON Web Tokens are an open, industry standard RFC 7519 method for representing claims securely between two parties.

    JSON Web Signature (JWS) Standard for signing arbitrary data, especially JSON Web Tokens (JWT).

    JSON Web Key Set (JWKS) Set of keys containing the public keys used to verify any JSON Web Token (JWT).

    Keycloak Open source auth server to allow single sign-on with identity and access management.

    Lightweight Directory Access Protocol (LDAP) Open standard for distributed directory information services for sharing of information about users, systems, networks, services and applications.

    Mutual Transport Layer Security (mTLS) Protocol for the mutual authentication of client-server communication, i.e., the client authenticates the server and the server authenticates the client, based on the acceptance of the X.509 certificates of each party.

    OAuth 2.0 (OAuth2) Industry-standard protocol for delegation.

    OpenID Connect (OIDC) Simple identity verification (authentication) layer built on top of the OAuth2 protocol.

    Open Policy Agent (OPA) Authorization policy agent that enables the usage of declarative authorization policies written in Rego language.

    Opaque token Security token devoid of explicit meaning (e.g. random string); it requires the usage of lookup mechanism to be translated into a meaningful set claims representing an identity.

    Permission Association between a protected resource the authorization policies that must be evaluated whether access should be granted; e.g. <user|group|role> CAN DO <action> ON RESOURCE <X>.

    Policy Rule or condition (authorization policy) that must be satisfied to grant access to a resource; strongly related to the different access control mechanisms (ACMs) and strategies one can use to protect resources, e.g. attribute-based access control (ABAC), role-based access control (RBAC), context-based access control, user-based access control (UBAC).

    Policy Administration Point (PAP) Set of UIs and APIs to manage resources servers, resources, scopes, policies and permissions; it is where the auth system is configured.

    Policy Decision Point (PDP) Where the authorization requests are sent, with permissions being requested, and authorization policies are evaluated accordingly.

    Policy Enforcement Point (PEP) Where the authorization is effectively enforced, usually at the resource server or at a proxy, based on a response provided by the Policy Decision Point (PDP).

    Policy storage Where policies are stored and from where they can be fetched, perhaps to be cached.

    Red Hat SSO Auth server; downstream product created from the Keycloak Open Source project.

    Refresh token Special type of security token, often provided together with an access token in an OAuth2 flow, used to renew the duration of an access token before it expires; it requires client authentication.

    Request Party Token (RPT) JSON Web Token (JWT) digitally signed using JSON Web Signature (JWS), issued by the Keycloak auth server.

    Resource One or more endpoints of a system, API or server, that can be protected.

    Resource-level Access Control (RLAC) Authorization model that takes into consideration attributes of each specific request resource to grant/deny access to those resources (e.g. the resource's owner).

    Resource server Server that hosts protected resources.

    Role Aspect of a user\u2019s identity assigned to the user to indicate the level of access they should have to the system; essentially, roles represent collections of permissions

    Role-based Access Control (RBAC) Authorization model that grants/denies access to resources based on the roles of authenticated users (rather than on complex attributes/policy rules).

    Scope Mechanism that defines the specific operations that applications can be allowed to do or information that they can request on an identity\u2019s behalf; often presented as a parameter when access is requested as a way to communicate what access is needed, and used by auth server to respond what actual access is granted.

    Single Page Application (SPA) Web application or website that interacts with the user by dynamically rewriting the current web page with new data from the web server.

    Single Sign-on (SSO) Authentication scheme that allows a user to log in with a single ID and password to any of several related, yet independent, software systems.

    Upstream (In the context of authentication/authorization) API whose endpoints must be protected by the auth system; the unprotected service in front of which a protection layer is added (by connecting with a Policy Decision Point).

    User-based Access Control (UBAC) Authorization model that grants/denies access to resources based on claims of the identity (attributes of the user).

    User-Managed Access (UMA) OAuth2-based access management protocol, used for users of an auth server to control the authorization process, i.e. directly granting/denying access to user-owned resources to other requesting parties.

    "},{"location":"authorino/docs/user-guides/","title":"User guides","text":"
    • Hello World The basics of protecting an API with Authorino.

    • Authentication with Kubernetes tokens (TokenReview API) Validate Kubernetes Service Account tokens to authenticate requests to your protected hosts.

    • Authentication with API keys Issue API keys stored in Kubernetes Secrets for clients to authenticate with your protected hosts.

    • Authentication with X.509 certificates and mTLS Verify client X.509 certificates against trusted root CAs.

    • OpenID Connect Discovery and authentication with JWTs Validate JSON Web Tokens (JWT) issued and signed by an OpenID Connect server; leverage OpenID Connect Discovery to automatically fetch JSON Web Key Sets (JWKS).

    • OAuth 2.0 token introspection (RFC 7662) Introspect OAuth 2.0 access tokens (e.g. opaque tokens) for online user data and token validation in request-time.

    • Passing credentials (Authorization header, cookie headers and others) Customize where credentials are supplied in the request by each trusted source of identity.

    • HTTP \"Basic\" Authentication (RFC 7235) Turn Authorino API key Secrets settings into HTTP basic auth.

    • Anonymous access Bypass identity verification or fall back to anonymous access when credentials fail to validate

    • Token normalization Normalize identity claims from trusted sources and reduce complexity in your policies.

    • Edge Authentication Architecture (EAA) Exchange satellite (outer-layer) authentication tokens for \"Festival Wristbands\" accepted ubiquitously at the inside of your network. Normalize from multiple and varied sources of identity and authentication methods in the edge of your architecture; filter privacy data, limit the scope of permissions, and simplify authorization rules to your internal microservices.

    • Fetching auth metadata from external sources Get online data from remote HTTP services to enhance authorization rules.

    • OpenID Connect UserInfo Fetch user info for OpenID Connect ID tokens in request-time for extra metadata for your policies and online verification of token validity.

    • Resource-level authorization with User-Managed Access (UMA) resource registry Fetch resource attributes relevant for authorization from a User-Managed Access (UMA) resource registry such as Keycloak resource server clients.

    • Simple pattern-matching authorization policies Write simple authorization rules based on JSON patterns matched against Authorino's Authorization JSON; check contextual information of the request, validate JWT claims, cross metadata fetched from external sources, etc.

    • OpenID Connect (OIDC) and Role-Based Access Control (RBAC) with Authorino and Keycloak Combine OpenID Connect (OIDC) authentication and Role-Based Access Control (RBAC) authorization rules leveraging Keycloak and Authorino working together.

    • Open Policy Agent (OPA) Rego policies Leverage the power of Open Policy Agent (OPA) policies, evaluated against Authorino's Authorization JSON in a built-in runtime compiled together with Authorino; pre-cache policies defined in Rego language inline or fetched from an external policy registry.

    • Kubernetes RBAC for service authorization (SubjectAccessReview API) Manage permissions in the Kubernetes RBAC and let Authorino to check them in request-time with the authorization system of the cluster.

    • Authorization with Keycloak Authorization Services Use Authorino as an adapter for Keycloak Authorization Services without importing any library or rebuilding your application code.

    • Integration with Authzed/SpiceDB Permission requests sent to a Google Zanzibar-based Authzed/SpiceDB instance, via gRPC.

    • Injecting data in the request Inject HTTP headers with serialized JSON content.

    • Authenticated rate limiting (with Envoy Dynamic Metadata) Provide Envoy with dynamic metadata from the external authorization process to be injected and used by consecutive filters, such as by a rate limiting service.

    • Redirecting to a login page Customize response status code and headers on failed requests. E.g. redirect users of a web application protected with Authorino to a login page instead of a 401 Unauthorized; mask resources on access denied behind a 404 Not Found response instead of 403 Forbidden.

    • Mixing Envoy built-in filter for auth and Authorino Have JWT validation handled by Envoy beforehand and the JWT payload injected into the request to Authorino, to be used in custom authorization policies defined in a AuthConfig.

    • Host override via context extension Induce the lookup of an AuthConfig by supplying extended host context, for use cases such as of path prefix-based lookup and wildcard subdomains lookup.

    • Using Authorino as ValidatingWebhook service Use Authorino as a generic Kubernetes ValidatingWebhook service where the rules to validate a request to the Kubernetes API are written in an AuthConfig.

    • Reducing the operational space: sharding, noise and multi-tenancy Have multiple instances of Authorino running in the same space (Kubernetes namespace or cluster-scoped), yet watching particular sets of resources.

    • Caching Cache auth objects resolved at runtime for any configuration bit of an AuthConfig, for easy access in subsequent requests whenever an arbitrary cache key repeats, until the cache entry expires.

    • Observability Prometheus metrics exported by Authorino, readiness probe, logging, tracing, etc.

    "},{"location":"authorino/docs/user-guides/anonymous-access/","title":"User guide: Anonymous access","text":"

    Bypass identity verification or fall back to anonymous access when credentials fail to validate

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 Anonymous access

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/anonymous-access/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/anonymous-access/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/anonymous-access/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/anonymous-access/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/anonymous-access/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/anonymous-access/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"public\":\n      anonymous: {}\nEOF\n

    The example above enables anonymous access (i.e. removes authentication), without adding any extra layer of protection to the API. This is virtually equivalent to setting a top-level condition to the AuthConfig that always skips the configuration, or to switching authentication/authorization off completely in the route to the API.

    For more sophisticated use cases of anonymous access with Authorino, consider combining this feature with other identity sources in the AuthConfig while playing with the priorities of each source, as well as combination with when conditions, and/or adding authorization policies that either cover authentication or address anonymous access with proper rules (e.g. enforcing read-only access).

    Check out the docs for the Anonymous access feature for an example of an AuthConfig that falls back to anonymous access when a priority OIDC/JWT-based authentication fails, and enforces a read-only policy in such cases.

    "},{"location":"authorino/docs/user-guides/anonymous-access/#consume-the-api","title":"\u277b Consume the API","text":"
    curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/anonymous-access/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/api-key-authentication/","title":"User guide: Authentication with API keys","text":"

    Issue API keys stored in Kubernetes Secrets for clients to authenticate with your protected hosts.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 API key

    In Authorino, API keys are stored as Kubernetes Secrets. Each resource must contain an api_key entry with the value of the API key, and labeled to match the selectors specified in spec.identity.apiKey.selector of the AuthConfig.

    API key Secrets must also include labels that match the secretLabelSelector field of the Authorino instance. See Resource reconciliation and status update for details.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/api-key-authentication/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/api-key-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/api-key-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/api-key-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/api-key-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/api-key-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\nEOF\n
    "},{"location":"authorino/docs/user-guides/api-key-authentication/#create-an-api-key","title":"\u277b Create an API key","text":"
    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/api-key-authentication/#consume-the-api","title":"\u277c Consume the API","text":"

    With a valid API key:

    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    With missing or invalid API key:

    curl -H 'Authorization: APIKEY invalid' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"friends\"\n# x-ext-auth-reason: the API Key provided is invalid\n
    "},{"location":"authorino/docs/user-guides/api-key-authentication/#delete-an-api-key-revoke-access-to-the-api","title":"\u277d Delete an API key (revoke access to the API)","text":"
    kubectl delete secret/api-key-1\n
    "},{"location":"authorino/docs/user-guides/api-key-authentication/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/","title":"User guide: Authenticated rate limiting (with Envoy Dynamic Metadata)","text":"

    Provide Envoy with dynamic metadata about the external authorization process to be injected into the rate limiting filter.

    Authorino capabilities featured in this guide:
    • Dynamic response \u2192 Response wrappers \u2192 Envoy Dynamic Metadata
    • Dynamic response \u2192 JSON injection
    • Identity verification & authentication \u2192 API key

    Dynamic JSON objects built out of static values and values fetched from the Authorization JSON can be wrapped to be returned to the reverse-proxy as Envoy Well Known Dynamic Metadata content. Envoy can use those to inject data returned by the external authorization service into the other filters, such as the rate limiting filter.

    Check out as well the user guides about Injecting data in the request and Authentication with API keys.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.

    At step \u277b, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-limitador","title":"\u2778 Deploy Limitador","text":"

    Limitador is a lightweight rate limiting service that can be used with Envoy.

    On this bundle, we will deploy Limitador pre-configured to limit requests to the talker-api domain up to 5 requests per interval of 60 seconds per user_id. Envoy will be configured to recognize the presence of Limitador and activate it on requests to the Talker API.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-the-talker-api","title":"\u2779 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#setup-envoy","title":"\u277a Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#create-an-authconfig","title":"\u277b Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    An annotation auth-data/username will be read from the Kubernetes API Key secret and passed as dynamic metadata { \"ext_auth_data\": { \"username\": \u00abannotations.auth-data/username\u00bb } }.

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  response:\n    success:\n      dynamicMetadata:\n        \"rate-limit\":\n          json:\n            properties:\n              \"username\":\n                expression: auth.identity.metadata.annotations['auth-data/username']\n          key: ext_auth_data # how this bit of dynamic metadata from the ext authz service is named in the Envoy config\nEOF\n

    Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON.

    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#create-the-api-keys","title":"\u277c Create the API keys","text":"

    For user John:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\n  annotations:\n    auth-data/username: john\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n

    For user Jane:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-2\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\n  annotations:\n    auth-data/username: jane\nstringData:\n  api_key: 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#consume-the-api","title":"\u277d Consume the API","text":"

    As John:

    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n

    Repeat the request a few more times within the 60-second time window, until the response status is 429 Too Many Requests.

    While the API is still limited to John, send requests as Jane:

    curl -H 'Authorization: APIKEY 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete secret/api-key-1\nkubectl delete secret/api-key-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/authzed/","title":"User guide: Integration with Authzed/SpiceDB","text":"

    Permission requests sent to a Google Zanzibar-based Authzed/SpiceDB instance, via gRPC.

    Authorino capabilities featured in this guide:
    • Authorization \u2192 SpiceDB
    • Identity verification & authentication \u2192 API key

    "},{"location":"authorino/docs/user-guides/authzed/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.

    At step \u277b, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/authzed/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/authzed/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/authzed/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/authzed/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/authzed/#create-the-permission-database","title":"\u277a Create the permission database","text":"

    Create the namespace:

    kubectl create namespace spicedb\n

    Create the SpiceDB instance:

    kubectl -n spicedb apply -f -<<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spicedb\n  labels:\n    app: spicedb\nspec:\n  selector:\n    matchLabels:\n      app: spicedb\n  template:\n    metadata:\n      labels:\n        app: spicedb\n    spec:\n      containers:\n\n      - name: spicedb\n        image: authzed/spicedb\n        args:\n        - serve\n        - \"--grpc-preshared-key\"\n        - secret\n        - \"--http-enabled\"\n        ports:\n        - containerPort: 50051\n        - containerPort: 8443\n  replicas: 1\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spicedb\nspec:\n  selector:\n    app: spicedb\n  ports:\n    - name: grpc\n      port: 50051\n      protocol: TCP\n    - name: http\n      port: 8443\n      protocol: TCP\nEOF\n

    Forward local request to the SpiceDB service inside the cluster:

    kubectl -n spicedb port-forward service/spicedb 8443:8443 2>&1 >/dev/null &\n

    Create the permission schema:

    curl -X POST http://localhost:8443/v1/schema/write \\\n  -H 'Authorization: Bearer secret' \\\n  -H 'Content-Type: application/json' \\\n  -d @- << EOF\n{\n  \"schema\": \"definition blog/user {}\\ndefinition blog/post {\\n\\trelation reader: blog/user\\n\\trelation writer: blog/user\\n\\n\\tpermission read = reader + writer\\n\\tpermission write = writer\\n}\"\n}\nEOF\n

    Create the relationships:

    • blog/user:emilia \u2192 writer of blog/post:1
    • blog/user:beatrice \u2192 reader of blog/post:1
    curl -X POST http://localhost:8443/v1/relationships/write \\\n  -H 'Authorization: Bearer secret' \\\n  -H 'Content-Type: application/json' \\\n  -d @- << EOF\n{\n  \"updates\": [\n    {\n      \"operation\": \"OPERATION_CREATE\",\n      \"relationship\": {\n        \"resource\": {\n          \"objectType\": \"blog/post\",\n          \"objectId\": \"1\"\n        },\n        \"relation\": \"writer\",\n        \"subject\": {\n          \"object\": {\n            \"objectType\": \"blog/user\",\n            \"objectId\": \"emilia\"\n          }\n        }\n      }\n    },\n    {\n      \"operation\": \"OPERATION_CREATE\",\n      \"relationship\": {\n        \"resource\": {\n          \"objectType\": \"blog/post\",\n          \"objectId\": \"1\"\n        },\n        \"relation\": \"reader\",\n        \"subject\": {\n          \"object\": {\n            \"objectType\": \"blog/user\",\n            \"objectId\": \"beatrice\"\n          }\n        }\n      }\n    }\n  ]\n}\nEOF\n
    "},{"location":"authorino/docs/user-guides/authzed/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.

    Store the shared token for Authorino to authenticate with the SpiceDB instance in a Service:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: spicedb\n  labels:\n    app: spicedb\nstringData:\n  grpc-preshared-key: secret\nEOF\n

    Create the AuthConfig:

    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"blog-users\":\n      apiKey:\n        selector:\n          matchLabels:\n            app: talker-api\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  authorization:\n    \"authzed-spicedb\":\n      spicedb:\n        endpoint: spicedb.spicedb.svc.cluster.local:50051\n        insecure: true\n        sharedSecretRef:\n          name: spicedb\n          key: grpc-preshared-key\n        subject:\n          kind:\n            value: blog/user\n          name:\n            selector: auth.identity.metadata.annotations.username\n        resource:\n          kind:\n            value: blog/post\n          name:\n            selector: context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2}\n        permission:\n          selector: context.request.http.method.@replace:{\"old\":\"GET\",\"new\":\"read\"}.@replace:{\"old\":\"POST\",\"new\":\"write\"}\nEOF\n
    "},{"location":"authorino/docs/user-guides/authzed/#create-the-api-keys","title":"\u277c Create the API keys","text":"

    For Emilia (writer):

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-writer\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: talker-api\n  annotations:\n    username: emilia\nstringData:\n  api_key: IAMEMILIA\nEOF\n

    For Beatrice (reader):

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-reader\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: talker-api\n  annotations:\n    username: beatrice\nstringData:\n  api_key: IAMBEATRICE\nEOF\n
    "},{"location":"authorino/docs/user-guides/authzed/#consume-the-api","title":"\u277d Consume the API","text":"

    As Emilia, send a GET request:

    curl -H 'Authorization: APIKEY IAMEMILIA' \\\n     -X GET \\\n     http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n

    As Emilia, send a POST request:

    curl -H 'Authorization: APIKEY IAMEMILIA' \\\n     -X POST \\\n     http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n

    As Beatrice, send a GET request:

    curl -H 'Authorization: APIKEY IAMBEATRICE' \\\n     -X GET \\\n     http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n

    As Beatrice, send a POST request:

    curl -H 'Authorization: APIKEY IAMBEATRICE' \\\n     -X POST \\\n     http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: PERMISSIONSHIP_NO_PERMISSION;token=GhUKEzE2NzU3MDE3MjAwMDAwMDAwMDA=\n
    "},{"location":"authorino/docs/user-guides/authzed/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace spicedb\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/caching/","title":"User guide: Caching","text":"

    Cache auth objects resolved at runtime for any configuration bit of an AuthConfig (i.e. any evaluator), of any phase (identity, metadata, authorization and dynamic response), for easy access in subsequent requests, whenever an arbitrary (user-defined) cache key repeats, until the cache entry expires.

    This is particularly useful for configuration bits whose evaluation is significantly more expensive than accessing the cache. E.g.:

    • Caching of metadata fetched from external sources in general
    • Caching of previously validated identity access tokens (e.g. for OAuth2 opaque tokens that involve consuming the token introspection endpoint of an external auth server)
    • Caching of complex Rego policies that involve sending requests to external services

    Cases where one will NOT want to enable caching, due to relatively cheap compared to accessing and managing the cache:

    • Validation of OIDC/JWT access tokens
    • OPA/Rego policies that do not involve external requests
    • JSON pattern-matching authorization
    • Dynamic JSON responses
    • Anonymous access
    Authorino capabilities featured in this guide:
    • Common feature \u2192 Caching
    • Identity verification & authentication \u2192 Anonymous access
    • External auth metadata \u2192 HTTP GET/GET-by-POST
    • Authorization \u2192 Open Policy Agent (OPA) Rego policies
    • Dynamic response \u2192 JSON injection

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/caching/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/caching/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/caching/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/caching/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/caching/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/caching/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    The example below enables caching for the external source of metadata, which in this case, for convenience, is the same upstream API protected by Authorino (i.e. the Talker API), though consumed directly by Authorino, without passing through the proxy. This API generates a uuid random hash that it injects in the JSON response. This value is different in every request processed by the API.

    The example also enables caching of returned OPA virtual documents. cached-authz is a trivial Rego policy that always grants access, but generates a timestamp, which Authorino will cache.

    In both cases, the path of the HTTP request is used as cache key. I.e., whenever the path repeats, Authorino reuse the values stored previously in each cache table (cached-metadata and cached-authz), respectively saving a request to the external source of metadata and the evaluation of the OPA policy. Cache entries will expire in both cases after 60 seconds they were stored in the cache.

    The cached values will be visible in the response returned by the Talker API in x-authz-data header injected by Authorino. This way, we can tell when an existing value in the cache was used and when a new one was generated and stored.

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"anonymous\":\n      anonymous: {}\n  metadata:\n    \"cached-metadata\":\n      http:\n        url: \"http://talker-api.default.svc.cluster.local:3000/metadata/{context.request.http.path}\"\n      cache:\n        key:\n          selector: context.request.http.path\n        ttl: 60\n  authorization:\n    \"cached-authz\":\n      opa:\n        rego: |\n          now = time.now_ns()\n          allow = true\n        allValues: true\n      cache:\n        key:\n          selector: context.request.http.path\n        ttl: 60\n  response:\n    success:\n      headers:\n        \"x-authz-data\":\n          json:\n            properties:\n              \"cached-metadata\":\n                selector: auth.metadata.cached-metadata.uuid\n              \"cached-authz\":\n                selector: auth.authorization.cached-authz.now\nEOF\n
    "},{"location":"authorino/docs/user-guides/caching/#consume-the-api","title":"\u277b Consume the API","text":"
    1. To /hello
    curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n#  \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343067462380300\\\",\\\"cached-metadata\\\":\\\"92c111cd-a10f-4e86-8bf0-e0cd646c6f79\\\"}\",\n# [\u2026]\n
    1. To a different path
    curl http://talker-api.127.0.0.1.nip.io:8000/goodbye\n# [\u2026]\n#  \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343097860450300\\\",\\\"cached-metadata\\\":\\\"37fce386-1ee8-40a7-aed1-bf8a208f283c\\\"}\",\n# [\u2026]\n
    1. To /hello again before the cache entry expires (60 seconds from the first request sent to this path)
    curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n#  \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343067462380300\\\",\\\"cached-metadata\\\":\\\"92c111cd-a10f-4e86-8bf0-e0cd646c6f79\\\"}\",  <=== same cache-id as before\n# [\u2026]\n
    1. To /hello again after the cache entry expires (60 seconds from the first request sent to this path)
    curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n#  \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343135702743800\\\",\\\"cached-metadata\\\":\\\"e708a3a6-5caf-4028-ab5c-573ad9be7188\\\"}\",  <=== different cache-id\n# [\u2026]\n
    "},{"location":"authorino/docs/user-guides/caching/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/","title":"User guide: Redirecting to a login page","text":"

    Customize response status code and headers on failed requests to redirect users of a web application protected with Authorino to a login page instead of a 401 Unauthorized.

    Authorino capabilities featured in this guide:
    • Dynamic response \u2192 Custom denial status
    • Identity verification & authentication \u2192 API key
    • Identity verification & authentication \u2192 JWT verification

    Authorino's default response status codes, messages and headers for unauthenticated (401) and unauthorized (403) requests can be customized with static values and values fetched from the Authorization JSON.

    Check out as well the user guides about HTTP \"Basic\" Authentication (RFC 7235) and OpenID Connect Discovery and authentication with JWTs.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample web application called Matrix Quotes to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#deploy-the-matrix-quotes-web-application","title":"\u2778 Deploy the Matrix Quotes web application","text":"

    The Matrix Quotes is a static web application that contains quotes from the film The Matrix.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/matrix-quotes-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Matrix Quotes webapp behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/envoy-deploy.yaml\n

    The command above creates an Ingress with host name matrix-quotes.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: matrix-quotes-protection\nspec:\n  hosts:\n\n  - matrix-quotes.127.0.0.1.nip.io\n  authentication:\n    \"browser-users\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: users\n      credentials:\n        cookie:\n          name: TOKEN\n    \"http-basic-auth\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: users\n      credentials:\n        authorizationHeader:\n          prefix: Basic\n  response:\n    unauthenticated:\n      code: 302\n      headers:\n        \"Location\":\n          expression: |\n            'http://matrix-quotes.127.0.0.1.nip.io:8000/login.html?redirect_to=' + request.path\nEOF\n

    Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON.

    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#create-an-api-key","title":"\u277b Create an API key","text":"
    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: user-credential-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: users\nstringData:\n  api_key: am9objpw # john:p\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#consume-the-application","title":"\u277c Consume the application","text":"

    On a web browser, navigate to http://matrix-quotes.127.0.0.1.nip.io:8000.

    Click on the cards to read quotes from characters of the movie. You should be redirected to login page.

    Log in using John's credentials:

    • Username: john
    • Password: p

    Click again on the cards and check that now you are able to access the inner pages.

    You can also consume a protected endpoint of the application using HTTP Basic Authentication:

    curl -u john:p http://matrix-quotes.127.0.0.1.nip.io:8000/neo.html\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#optional-modify-the-authconfig-to-authenticate-with-oidc","title":"\u277d (Optional) Modify the AuthConfig to authenticate with OIDC","text":""},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#setup-a-keycloak-server","title":"Setup a Keycloak server","text":"

    Deploy a Keycloak server preloaded with a realm named kuadrant:

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    Resolve local Keycloak domain so it can be accessed from the local host and inside the cluster with the name: (This will be needed to redirect to Keycloak's login page and at the same time validate issued tokens.)

    echo '127.0.0.1 keycloak' >> /etc/hosts\n

    Forward local requests to the instance of Keycloak running in the cluster:

    kubectl port-forward deployment/keycloak 8080:8080 2>&1 >/dev/null &\n

    Create a client:

    curl -H \"Authorization: Bearer $(curl http://keycloak:8080/realms/master/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=admin-cli' -d 'username=admin' -d 'password=p' | jq -r .access_token)\" \\\n     -H 'Content-type: application/json' \\\n     -d '{ \"name\": \"matrix-quotes\", \"clientId\": \"matrix-quotes\", \"publicClient\": true, \"redirectUris\": [\"http://matrix-quotes.127.0.0.1.nip.io:8000/auth*\"], \"enabled\": true }' \\\n     http://keycloak:8080/admin/realms/kuadrant/clients\n
    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#reconfigure-the-matrix-quotes-app-to-use-keycloaks-login-page","title":"Reconfigure the Matrix Quotes app to use Keycloak's login page","text":"
    kubectl set env deployment/matrix-quotes KEYCLOAK_REALM=http://keycloak:8080/realms/kuadrant CLIENT_ID=matrix-quotes\n
    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#apply-the-changes-to-the-authconfig","title":"Apply the changes to the AuthConfig","text":"
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: matrix-quotes-protection\nspec:\n  hosts:\n\n  - matrix-quotes.127.0.0.1.nip.io\n  authentication:\n    \"idp-users\":\n      jwt:\n        issuerUrl: http://keycloak:8080/realms/kuadrant\n      credentials:\n        cookie:\n          name: TOKEN\n  response:\n    unauthenticated:\n      code: 302\n      headers:\n        \"Location\":\n          expression: |\n            'http://keycloak:8080/realms/kuadrant/protocol/openid-connect/auth?client_id=matrix-quotes&redirect_uri=http://matrix-quotes.127.0.0.1.nip.io:8000/auth?redirect_to=' + request.path + '&scope=openid&response_type=code'\nEOF\n
    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#consume-the-application-again","title":"Consume the application again","text":"

    Refresh the browser window or navigate again to http://matrix-quotes.127.0.0.1.nip.io:8000.

    Click on the cards to read quotes from characters of the movie. You should be redirected to login page this time served by the Keycloak server.

    Log in as Jane (a user of the Keycloak realm):

    • Username: jane
    • Password: p

    Click again on the cards and check that now you are able to access the inner pages.

    "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete secret/user-credential-1\nkubectl delete authconfig/matrix-quotes-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/matrix-quotes-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/","title":"User guide: Edge Authentication Architecture (EAA)","text":"

    Edge Authentication Architecture (EAA) is a pattern where more than extracting authentication logics and specifics from the application codebase to a proper authN/authZ layer, this is pushed to the edge of your cloud network, without violating the Zero Trust principle nevertheless.

    The very definition of \"edge\" is subject to discussion, but the underlying idea is that clients (e.g. API clients, IoT devices, etc.) authenticate with a layer that, before moving traffic to inside the network:

    • understands the complexity of all the different methods of authentication supported;
    • sometimes some token normalization is involved;
    • eventually enforces some preliminary authorization policies; and
    • possibly filters data bits that are sensitive to privacy concerns (e.g. to comply with local legislation such as GRPD, CCPA, etc)

    As a minimum, EAA allows to simplify authentication between applications and microservices inside the network, as well as to reduce authorization to domain-specific rules and policies, rather than having to deal all the complexity to support all types of clients in every node.

    Authorino capabilities featured in this guide:
    • Dynamic response \u2192 Festival Wristband tokens
    • Identity verification & authentication \u2192 Identity extension
    • Identity verification & authentication \u2192 API key
    • Identity verification & authentication \u2192 JWT verification

    Festival Wristbands are OpenID Connect ID tokens (signed JWTs) issued by Authorino by the end of the Auth Pipeline, for authorized requests. It can be configured to include claims based on static values and values fetched from the Authorization JSON.

    Check out as well the user guides about Token normalization, Authentication with API keys and OpenID Connect Discovery and authentication with JWTs.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
    • jq, to extract parts of JSON responses
    • jwt, to inspect JWTs (optional)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    The next steps walk you through installing Authorino and configuring 2 environments of an architecture, edge and internal.

    The first environment is a facade for handling the first layer of authentication and exchanging any valid presented authentication token for a Festival Wristband token. In the second, we will deploy a sample service called Talker API that the authorization service will ensure to receive only authenticated traffic presented with a valid Festival Wristband.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u2779.

    At steps \u2779 and \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-namespaces","title":"\u2777 Create the namespaces","text":"

    For simplicity, this examples will set up edge and internal nodes in different namespaces of the same Kubernetes cluster. Those will share a same single cluster-wide Authorino instance. In real-life scenarios, it does not have to be like that.

    kubectl create namespace authorino\nkubectl create namespace edge\nkubectl create namespace internal\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#deploy-authorino","title":"\u2778 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources cluster-wide2, with TLS disabled3.

    kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  clusterWide: true\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-the-edge","title":"\u2779 Setup the Edge","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-envoy","title":"Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up external authorization with the Authorino instance.4

    kubectl -n edge apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/eaa/envoy-edge-deploy.yaml\n

    The command above creates an Ingress with host name edge.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 9000 to the Envoy service running inside the cluster:

    kubectl -n edge port-forward deployment/envoy 9000:9000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-authconfig","title":"Create the AuthConfig","text":"

    Create a required secret that will be used by Authorino to sign the Festival Wristband tokens:

    kubectl -n edge apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: wristband-signing-key\nstringData:\n  key.pem: |\n    -----BEGIN EC PRIVATE KEY-----\n    MHcCAQEEIDHvuf81gVlWGo0hmXGTAnA/HVxGuH8vOc7/8jewcVvqoAoGCCqGSM49\n    AwEHoUQDQgAETJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZxJKDysoGwn\n    cnUvHIu23SgW+Ee9lxSmZGhO4eTdQeKxMA==\n    -----END EC PRIVATE KEY-----\ntype: Opaque\nEOF\n

    Create the config:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl -n edge apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: edge-auth\nspec:\n  hosts:\n\n  - edge.127.0.0.1.nip.io\n  authentication:\n    \"api-clients\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino.kuadrant.io/managed-by: authorino\n        allNamespaces: true\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n      overrides:\n        \"username\":\n          selector: auth.identity.metadata.annotations.authorino\\.kuadrant\\.io/username\n    \"idp-users\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n      defaults:\n        \"username\":\n          selector: auth.identity.preferred_username\n  response:\n    success:\n      dynamicMetadata:\n        \"wristband\":\n          wristband:\n            issuer: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\n            customClaims:\n              \"username\":\n                selector: auth.identity.username\n            tokenDuration: 300\n            signingKeyRefs:\n            - name: wristband-signing-key\n              algorithm: ES256\nEOF\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-the-internal-workload","title":"\u277a Setup the internal workload","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#deploy-the-talker-api","title":"Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl -n internal apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-envoy_1","title":"Setup Envoy","text":"

    This other bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.

    kubectl -n internal apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/eaa/envoy-node-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl -n internal port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-authconfig_1","title":"Create the AuthConfig","text":"Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl -n internal apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"edge-authenticated\":\n      jwt:\n        issuerUrl: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\nEOF\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-an-api-key","title":"\u277b Create an API key","text":"
    kubectl -n edge apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n  annotations:\n    authorino.kuadrant.io/username: alice\n    authorino.kuadrant.io/email: alice@host\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#consume-the-api","title":"\u277c Consume the API","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#using-the-api-key-to-authenticate","title":"Using the API key to authenticate","text":"

    Authenticate at the edge:

    WRISTBAND_TOKEN=$(curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://edge.127.0.0.1.nip.io:9000/auth -is | tr -d '\\r' | sed -En 's/^x-wristband-token: (.*)/\\1/p')\n

    Consume the API:

    curl -H \"Authorization: Bearer $WRISTBAND_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n

    Try to consume the API with authentication token that is only accepted in the edge:

    curl -H \"Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"edge-authenticated\"\n# x-ext-auth-reason: credential not found\n

    (Optional) Inspect the wristband token and verify that it only contains restricted info to authenticate and authorize with internal apps.

    jwt decode $WRISTBAND_TOKEN\n# [...]\n#\n# Token claims\n# ------------\n# {\n#   \"exp\": 1638452051,\n#   \"iat\": 1638451751,\n#   \"iss\": \"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\",\n#   \"sub\": \"02cb51ea0e1c9f3c0960197a2518c8eb4f47e1b9222a968ffc8d4c8e783e4d19\",\n#   \"username\": \"alice\"\n# }\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#authenticating-with-the-keycloak-server","title":"Authenticating with the Keycloak server","text":"

    Obtain an access token with the Keycloak server for Jane:

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:

    ACCESS_TOKEN=$(kubectl -n edge run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    (Optional) Inspect the access token issue by Keycloak and verify and how it contains more details about the identity than required to authenticate and authorize with internal apps.

    jwt decode $ACCESS_TOKEN\n# [...]\n#\n# Token claims\n# ------------\n# { [...]\n#   \"email\": \"jane@kuadrant.io\",\n#   \"email_verified\": true,\n#   \"exp\": 1638452220,\n#   \"family_name\": \"Smith\",\n#   \"given_name\": \"Jane\",\n#   \"iat\": 1638451920,\n#   \"iss\": \"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\",\n#   \"jti\": \"699f6e49-dea4-4f29-ae2a-929a3a18c94b\",\n#   \"name\": \"Jane Smith\",\n#   \"preferred_username\": \"jane\",\n#   \"realm_access\": {\n#     \"roles\": [\n#       \"offline_access\",\n#       \"member\",\n#       \"admin\",\n#       \"uma_authorization\"\n#     ]\n#   },\n# [...]\n

    As Jane, obtain a limited wristband token at the edge:

    WRISTBAND_TOKEN=$(curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://edge.127.0.0.1.nip.io:9000/auth -is | tr -d '\\r' | sed -En 's/^x-wristband-token: (.*)/\\1/p')\n

    Consume the API:

    curl -H \"Authorization: Bearer $WRISTBAND_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete namespace edge\nkubectl delete namespace internal\nkubectl delete namespace authorino\nkubectl delete namespace keycloak\n

    To uninstall the Authorino and Authorino Operator manifests, run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. cluster-wide reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/","title":"User guide: Mixing Envoy built-in filter for auth and Authorino","text":"

    Have JWT validation handled by Envoy beforehand and the JWT payload injected into the request to Authorino, to be used in custom authorization policies defined in a AuthConfig.

    In this user guide, we will set up Envoy and Authorino to protect a service called the Talker API service, with JWT authentication handled in Envoy and a more complex authorization policy enforced in Authorino.

    The policy defines a geo-fence by which only requests originated in Great Britain (country code: GB) will be accepted, unless the user is bound to a role called 'admin' in the auth server, in which case no geofence is enforced.

    All requests to the Talker API will be authenticated in Envoy. However, requests to /global will not trigger the external authorization.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 Plain
    • External auth metadata \u2192 HTTP GET/GET-by-POST
    • Authorization \u2192 Pattern-matching authorization
    • Dynamic response \u2192 Custom denial status

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
    • jq, to extract parts of JSON responses

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.

    At step \u277b, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following command deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  labels:\n    app: authorino\n  name: envoy\ndata:\n  envoy.yaml: |\n    static_resources:\n      clusters:\n\n      - name: talker-api\n        connect_timeout: 0.25s\n        type: strict_dns\n        lb_policy: round_robin\n        load_assignment:\n          cluster_name: talker-api\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: talker-api\n                    port_value: 3000\n      - name: keycloak\n        connect_timeout: 0.25s\n        type: logical_dns\n        lb_policy: round_robin\n        load_assignment:\n          cluster_name: keycloak\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: keycloak.keycloak.svc.cluster.local\n                    port_value: 8080\n      - name: authorino\n        connect_timeout: 0.25s\n        type: strict_dns\n        lb_policy: round_robin\n        http2_protocol_options: {}\n        load_assignment:\n          cluster_name: authorino\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: authorino-authorino-authorization\n                    port_value: 50051\n      listeners:\n      - address:\n          socket_address:\n            address: 0.0.0.0\n            port_value: 8000\n        filter_chains:\n        - filters:\n          - name: envoy.http_connection_manager\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n              stat_prefix: local\n              route_config:\n                name: local_route\n                virtual_hosts:\n                - name: local_service\n                  domains: ['*']\n                  routes:\n                  - match: { path_separated_prefix: /global }\n                    route: { cluster: talker-api }\n                    typed_per_filter_config:\n                      envoy.filters.http.ext_authz:\n                        \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n                        disabled: true\n                  - match: { prefix: / }\n                    route: { cluster: talker-api }\n              http_filters:\n              - name: envoy.filters.http.jwt_authn\n                typed_config:\n                  \"@type\": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication\n                  providers:\n                    keycloak:\n                      issuer: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n                      remote_jwks:\n                        http_uri:\n                          uri: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/certs\n                          cluster: keycloak\n                          timeout: 5s\n                        cache_duration:\n                          seconds: 300\n                      payload_in_metadata: verified_jwt\n                  rules:\n                  - match: { prefix: / }\n                    requires: { provider_name: keycloak }\n              - name: envoy.filters.http.ext_authz\n                typed_config:\n                  \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n                  transport_api_version: V3\n                  failure_mode_allow: false\n                  metadata_context_namespaces:\n                  - envoy.filters.http.jwt_authn\n                  grpc_service:\n                    envoy_grpc:\n                      cluster_name: authorino\n                    timeout: 1s\n              - name: envoy.filters.http.router\n                typed_config:\n                  \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n              use_remote_address: true\n    admin:\n      access_log_path: \"/tmp/admin_access.log\"\n      address:\n        socket_address:\n          address: 0.0.0.0\n          port_value: 8001\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: authorino\n    svc: envoy\n  name: envoy\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: authorino\n      svc: envoy\n  template:\n    metadata:\n      labels:\n        app: authorino\n        svc: envoy\n    spec:\n      containers:\n      - args:\n        - --config-path /usr/local/etc/envoy/envoy.yaml\n        - --service-cluster front-proxy\n        - --log-level info\n        - --component-log-level filter:trace,http:debug,router:debug\n        command:\n        - /usr/local/bin/envoy\n        image: envoyproxy/envoy:v1.22-latest\n        name: envoy\n        ports:\n        - containerPort: 8000\n          name: web\n        - containerPort: 8001\n          name: admin\n        volumeMounts:\n        - mountPath: /usr/local/etc/envoy\n          name: config\n          readOnly: true\n      volumes:\n      - configMap:\n          items:\n          - key: envoy.yaml\n            path: envoy.yaml\n          name: envoy\n        name: config\n---\napiVersion: v1\nkind: Service\nmetadata:\n  labels:\n    app: authorino\n  name: envoy\nspec:\n  ports:\n  - name: web\n    port: 8000\n    protocol: TCP\n  selector:\n    app: authorino\n    svc: envoy\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-wildcard-host\nspec:\n  rules:\n  - host: talker-api.127.0.0.1.nip.io\n    http:\n      paths:\n      - backend:\n          service:\n            name: envoy\n            port:\n              number: 8000\n        path: /\n        pathType: Prefix\nEOF\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-the-ip-location-service","title":"\u277a Deploy the IP Location service","text":"

    The IP Location service is a simple service that resolves an IPv4 address into geo location info.

    kubectl apply -f https://raw.githubusercontent.com/Kuadrant/authorino-examples/main/ip-location/ip-location-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#create-an-authconfig","title":"\u277b Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"jwt\":\n      plain:\n        selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\n  metadata:\n    \"geoinfo\":\n      http:\n        url: 'http://ip-location.default.svc.cluster.local:3000/{context.request.http.headers.x-forwarded-for.@extract:{\"sep\":\",\"}}'\n        headers:\n          \"Accept\":\n            value: application/json\n      cache:\n        key:\n          selector: \"context.request.http.headers.x-forwarded-for.@extract:{\\\"sep\\\":\\\",\\\"}\"\n  authorization:\n    \"geofence\":\n      when:\n      - selector: auth.identity.realm_access.roles\n        operator: excl\n        value: admin\n      patternMatching:\n        patterns:\n        - selector: auth.metadata.geoinfo.country_iso_code\n          operator: eq\n          value: \"GB\"\n  response:\n    unauthorized:\n      message:\n        selector: \"The requested resource is not available in {auth.metadata.geoinfo.country_name}\"\nEOF\n
    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-a-token-and-consume-the-api","title":"\u277c Obtain a token and consume the API","text":""},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"

    Obtain an access token with the Keycloak server for John:

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster for the user John, a non-admin (member) user:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    As John, consume the API inside the area where the policy applies:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 79.123.45.67' \\\n     http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n

    As John, consume the API outside the area where the policy applies:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 109.69.200.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: The requested resource is not available in Italy\n

    As John, consume a path of the API that will cause Envoy to skip external authorization:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 109.69.200.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000/global -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-an-access-token-and-consume-the-api-as-jane-admin","title":"Obtain an access token and consume the API as Jane (admin)","text":"

    Obtain an access token with the Keycloak server for Jane, an admin user:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    As Jane, consume the API inside the area where the policy applies:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 79.123.45.67' \\\n     http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n

    As Jane, consume the API outside the area where the policy applies:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 109.69.200.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n

    As Jane, consume a path of the API that will cause Envoy to skip external authorization:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 109.69.200.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000/global -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete ingress/ingress-wildcard-host\nkubectl delete service/envoy\nkubectl delete deployment/envoy\nkubectl delete configmap/envoy\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/external-metadata/","title":"User guide: Fetching auth metadata from external sources","text":"

    Get online data from remote HTTP services to enhance authorization rules.

    Authorino capabilities featured in this guide:
    • External auth metadata \u2192 HTTP GET/GET-by-POST
    • Identity verification & authentication \u2192 API key
    • Authorization \u2192 Open Policy Agent (OPA) Rego policies

    You can configure Authorino to fetch additional metadata from external sources in request-time, by sending either GET or POST request to an HTTP service. The service is expected to return a JSON content which is appended to the Authorization JSON, thus becoming available for usage in other configs of the Auth Pipeline, such as in authorization policies or custom responses.

    URL, parameters and headers of the request to the external source of metadata can be configured, including with dynamic values. Authentication between Authorino and the service can be set as part of these configuration options, or based on shared authentication token stored in a Kubernetes Secret.

    Check out as well the user guides about Authentication with API keys and Open Policy Agent (OPA) Rego policies.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/external-metadata/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/external-metadata/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/external-metadata/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/external-metadata/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/external-metadata/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/external-metadata/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    In this example, we will implement a geofence policy for the API, using OPA and metadata fetching from an external service that returns geolocalization JSON data for a given IP address. The policy establishes that only GET requests are allowed and the path of the request should be in the form /{country-code}/*, where {country-code} is the 2-character code of the country where the client is identified as being physically present.

    The implementation relies on the X-Forwarded-For HTTP header to read the client's IP address.

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  metadata:\n    \"geo\":\n      http:\n        urlExpression: |\n          'http://ip-api.com/json/' + request.headers['x-forwarded-for'].split(',')[0] + '?fields=countryCode'\n        headers:\n          \"Accept\":\n            expression: '\"application/json\"'\n  authorization:\n    \"geofence\":\n      opa:\n        rego: |\n          import input.context.request.http\n\n          allow {\n            http.method = \"GET\"\n            split(http.path, \"/\") = [_, requested_country, _]\n            lower(requested_country) == lower(object.get(input.auth.metadata.geo, \"countryCode\", \"\"))\n          }\nEOF\n

    Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON.

    "},{"location":"authorino/docs/user-guides/external-metadata/#create-an-api-key","title":"\u277b Create an API key","text":"
    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/external-metadata/#consume-the-api","title":"\u277c Consume the API","text":"

    From an IP address assigned to the United Kingdom of Great Britain and Northern Ireland (country code GB):

    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 79.123.45.67' \\\n     http://talker-api.127.0.0.1.nip.io:8000/gb/hello -i\n# HTTP/1.1 200 OK\n
    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 79.123.45.67' \\\n     http://talker-api.127.0.0.1.nip.io:8000/it/hello -i\n# HTTP/1.1 403 Forbidden\n

    From an IP address assigned to Italy (country code IT):

    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 109.112.34.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000/gb/hello -i\n# HTTP/1.1 403 Forbidden\n
    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 109.112.34.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000/it/hello -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/external-metadata/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/hello-world/","title":"User guide: Hello World","text":""},{"location":"authorino/docs/user-guides/hello-world/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant, you can skip step \u2778. You may already have Authorino installed and running as well. In this case, skip also step \u277a. If you even have your workload cluster configured, with sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, go straight to step \u277c.

    At step \u277c, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/hello-world/#create-the-namespace","title":"\u2776 Create the namespace","text":"
    kubectl create namespace hello-world\n# namespace/hello-world created\n
    "},{"location":"authorino/docs/user-guides/hello-world/#deploy-the-talker-api","title":"\u2777 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n# deployment.apps/talker-api created\n# service/talker-api created\n
    "},{"location":"authorino/docs/user-guides/hello-world/#setup-envoy","title":"\u2778 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.1

    kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/envoy-deploy.yaml\n# configmap/envoy created\n# deployment.apps/envoy created\n# service/envoy created\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl -n hello-world port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-unprotected","title":"\u2779 Consume the API (unprotected)","text":"
    curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/hello-world/#protect-the-api","title":"\u277a Protect the API","text":""},{"location":"authorino/docs/user-guides/hello-world/#install-the-authorino-operator","title":"Install the Authorino Operator","text":"
    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/hello-world/#deploy-authorino","title":"Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service2 that watches for AuthConfig resources in the hello-world namespace3, with TLS disabled4.

    kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/authorino.yaml\n# authorino.operator.authorino.kuadrant.io/authorino created\n
    "},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-behind-envoy-and-authorino","title":"\u277b Consume the API behind Envoy and Authorino","text":"
    curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 404 Not Found\n# x-ext-auth-reason: Service not found\n

    Authorino does not know about the talker-api.127.0.0.1.nip.io host, hence the 404 Not Found. Let's teach Authorino about this host by applying an AuthConfig.

    "},{"location":"authorino/docs/user-guides/hello-world/#apply-the-authconfig","title":"\u277c Apply the AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/authconfig.yaml\n# authconfig.authorino.kuadrant.io/talker-api-protection created\n
    "},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-without-credentials","title":"\u277d Consume the API without credentials","text":"
    curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-clients\"\n# x-ext-auth-reason: credential not found\n
    "},{"location":"authorino/docs/user-guides/hello-world/#grant-access-to-the-api-with-a-tailor-made-security-scheme","title":"Grant access to the API with a tailor-made security scheme","text":"

    Check out other user guides for several use-cases of authentication and authorization, and the instructions to implement them using Authorino.

    A few examples of available ser guides:

    • Authentication with API keys
    • Authentication with JWTs and OpenID Connect Discovery
    • Authentication with Kubernetes tokens (TokenReview API)
    • Authorization with Open Policy Agent (OPA) Rego policies
    • Authorization with simple JSON pattern-matching rules (e.g. JWT claims)
    • Authorization with Kubernetes RBAC (SubjectAccessReview API)
    • Fetching auth metadata from external sources
    • Token normalization
    "},{"location":"authorino/docs/user-guides/hello-world/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the namespaces created in step 1 and 5:

    kubectl delete namespace hello-world\nkubectl delete namespace authorino-operator\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    2. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    3. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    4. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/host-override/","title":"Host override via context extension","text":"

    By default, Authorino uses the host information of the HTTP request (Attributes.Http.Host) to lookup for an indexed AuthConfig to be enforced1. The host info be overridden by supplying a host entry as a (per-route) context extension (Attributes.ContextExtensions), which takes precedence whenever present.

    Overriding the host attribute of the HTTP request can be useful to support use cases such as of path prefix-based lookup and wildcard subdomains lookup.

    \u26a0\ufe0f Important: This feature may not be available to users of Authorino via Kuadrant.

    In this guide:

    • Example of host override for path prefix-based lookup
    • Example of host override for wildcard subdomain lookup
    "},{"location":"authorino/docs/user-guides/host-override/#example-of-host-override-for-path-prefix-based-lookup","title":"Example of host override for path prefix-based lookup","text":"

    In this use case, 2 different APIs (i.e. Dogs API and Cats API) are served under the same base domain, and differentiated by the path prefix:

    • pets.com/dogs \u2192 Dogs API
    • pets.com/cats \u2192 Cats API

    Edit the Envoy config to extend the external authorization settings at the level of the routes, with the host value that will be favored by Authorino before the actual host attribute of the HTTP request:

    virtual_hosts:\n\n- name: pets-api\n  domains: ['pets.com']\n  routes:\n  - match:\n      prefix: /dogs\n    route:\n      cluster: dogs-api\n    typed_per_filter_config:\n      envoy.filters.http.ext_authz:\n        \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n        check_settings:\n          context_extensions:\n            host: dogs.pets.com\n  - match:\n      prefix: /cats\n    route:\n      cluster: cats-api\n    typed_per_filter_config:\n      envoy.filters.http.ext_authz:\n        \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n        check_settings:\n          context_extensions:\n            host: cats.pets.com\n

    Create the AuthConfig for the Pets API:

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: dogs-api-protection\nspec:\n  hosts:\n\n  - dogs.pets.com\n\n  authentication: [...]\n

    Create the AuthConfig for the Cats API:

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: cats-api-protection\nspec:\n  hosts:\n\n  - cats.pets.com\n\n  authentication: [...]\n

    Notice that the host subdomains dogs.pets.com and cats.pets.com are not really requested by the API consumers. Rather, users send requests to pets.com/dogs and pets.com/cats. When routing those requests, Envoy makes sure to inject the corresponding context extensions that will induce the right lookup in Authorino.

    "},{"location":"authorino/docs/user-guides/host-override/#example-of-host-override-for-wildcard-subdomain-lookup","title":"Example of host override for wildcard subdomain lookup","text":"

    In this use case, a single Pets API serves requests for any subdomain that matches *.pets.com, e.g.:

    • dogs.pets.com \u2192 Pets API
    • cats.pets.com \u2192 Pets API

    Edit the Envoy config to extend the external authorization settings at the level of the virtual host, with the host value that will be favored by Authorino before the actual host attribute of the HTTP request:

    virtual_hosts:\n\n- name: pets-api\n  domains: ['*.pets.com']\n  typed_per_filter_config:\n    envoy.filters.http.ext_authz:\n      \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n      check_settings:\n        context_extensions:\n          host: pets.com\n  routes:\n  - match:\n      prefix: /\n    route:\n      cluster: pets-api\n

    The host context extension used above is any key that matches one of the hosts listed in the targeted AuthConfig.

    Create the AuthConfig for the Pets API:

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: pets-api-protection\nspec:\n  hosts:\n\n  - pets.com\n\n  authentication: [...]\n

    Notice that requests to dogs.pets.com and to cats.pets.com are all routed by Envoy to the same API, with same external authorization configuration. in all the cases, Authorino will lookup for the indexed AuthConfig associated with pets.com. The same is valid for a request sent, e.g., to birds.pets.com.

    1. For further details about Authorino lookup of AuthConfig, check out Host lookup.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/http-basic-authentication/","title":"User guide: HTTP \"Basic\" Authentication (RFC 7235)","text":"

    Turn Authorino API key Secrets settings into HTTP basic auth.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 API key
    • Authorization \u2192 Pattern-matching authorization

    HTTP \"Basic\" Authentication (RFC 7235) is not recommended if you can afford other more secure methods such as OpenID Connect. To support legacy nonetheless it is sometimes necessary to implement it.

    In Authorino, HTTP \"Basic\" Authentication can be modeled leveraging the API key authentication feature (stored as Kubernetes Secrets with an api_key entry and labeled to match selectors specified in spec.identity.apiKey.selector of the AuthConfig).

    Check out as well the user guide about Authentication with API keys.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    The config uses API Key secrets to store base64-encoded username:password HTTP \"Basic\" authentication credentials. The config also specifies an Access Control List (ACL) by which only user john is authorized to consume the /bye endpoint of the API.

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"http-basic-auth\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: users\n      credentials:\n        authorizationHeader:\n          prefix: Basic\n  authorization:\n    \"acl\":\n      when:\n      - predicate: request.path == '/bye'\n      patternMatching:\n        patterns:\n        - selector: context.request.http.headers.authorization.@extract:{\"pos\":1}|@base64:decode|@extract:{\"sep\":\":\"}\n          operator: eq\n          value: john\nEOF\n

    Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON. Check out as well the common feature Conditions about skipping parts of an AuthConfig in the auth pipeline based on context.

    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#create-user-credentials","title":"\u277b Create user credentials","text":"

    To create credentials for HTTP \"Basic\" Authentication, store each username:password, base64-encoded, in the api_key value of the Kubernetes Secret resources. E.g.:

    printf \"john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" | base64\n# am9objpuZHlCenJlVXpGNHpxRFFzcVNQTUhrUmhyaUVPdGNSeA==\n

    Create credentials for user John:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: basic-auth-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: users\nstringData:\n  api_key: am9objpuZHlCenJlVXpGNHpxRFFzcVNQTUhrUmhyaUVPdGNSeA== # john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n

    Create credentials for user Jane:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: basic-auth-2\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: users\nstringData:\n  api_key: amFuZTpkTnNScnNhcHkwbk5Dd210NTM3ZkhGcHl4MGNCc0xFcA== # jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#consume-the-api","title":"\u277c Consume the API","text":"

    As John (authorized in the ACL):

    curl -u john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
    curl -u john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx http://talker-api.127.0.0.1.nip.io:8000/bye\n# HTTP/1.1 200 OK\n

    As Jane (NOT authorized in the ACL):

    curl -u jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
    curl -u jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp http://talker-api.127.0.0.1.nip.io:8000/bye -i\n# HTTP/1.1 403 Forbidden\n

    With an invalid user/password:

    curl -u unknown:invalid http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Basic realm=\"http-basic-auth\"\n
    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#revoke-access-to-the-api","title":"\u277d Revoke access to the API","text":"
    kubectl delete secret/basic-auth-1\n
    "},{"location":"authorino/docs/user-guides/http-basic-authentication/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete secret/basic-auth-1\nkubectl delete secret/basic-auth-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/injecting-data/","title":"User guide: Injecting data in the request","text":"

    Inject HTTP headers with serialized JSON content.

    Authorino capabilities featured in this guide:
    • Dynamic response \u2192 JSON injection
    • Identity verification & authentication \u2192 API key

    Inject serialized custom JSON objects as HTTP request headers. Values can be static or fetched from the Authorization JSON.

    Check out as well the user guide about Authentication with API keys.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/injecting-data/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/injecting-data/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/injecting-data/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/injecting-data/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/injecting-data/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/injecting-data/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    The following defines a JSON object to be injected as an added HTTP header into the request, named after the response config x-ext-auth-data. The object includes 3 properties:

    1. a static value authorized: true;
    2. a dynamic value request-time, from Envoy-supplied contextual data present in the Authorization JSON; and
    3. a greeting message geeting-message that interpolates a dynamic value read from an annotation of the Kubernetes Secret resource that represents the API key used to authenticate into a static string.
    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  response:\n    success:\n      headers:\n        \"x-ext-auth-data\":\n          json:\n            properties:\n              \"authorized\":\n                expression: \"true\"\n              \"request-time\":\n                expression: request.time.seconds\n              \"greeting-message\":\n                expression: |\n                  'Hello, ' + auth.identity.metadata.annotations['auth-data/name']\nEOF\n

    Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON.

    "},{"location":"authorino/docs/user-guides/injecting-data/#create-an-api-key","title":"\u277b Create an API key","text":"
    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\n  annotations:\n    auth-data/name: Rita\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/injecting-data/#consume-the-api","title":"\u277c Consume the API","text":"
    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# {\n#   \"method\": \"GET\",\n#   \"path\": \"/hello\",\n#   \"query_string\": null,\n#   \"body\": \"\",\n#   \"headers\": {\n#     \u2026\n#     \"X-Ext-Auth-Data\": \"{\\\"authorized\\\":true,\\\"greeting-message\\\":\\\"Hello, Rita!\\\",\\\"request-time\\\":1637954644}\",\n#   },\n#   \u2026\n# }\n
    "},{"location":"authorino/docs/user-guides/injecting-data/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/","title":"User guide: Simple pattern-matching authorization policies","text":"

    Write simple authorization rules based on JSON patterns matched against Authorino's Authorization JSON; check contextual information of the request, validate JWT claims, cross metadata fetched from external sources, etc.

    Authorino capabilities featured in this guide:
    • Authorization \u2192 Pattern-matching authorization
    • Identity verification & authentication \u2192 JWT verification

    Authorino provides a built-in authorization module to check simple pattern-matching rules against the Authorization JSON. This is an alternative to OPA when all you want is to check for some simple rules, without complex logics, such as match the value of a JWT claim.

    Check out as well the user guide about OpenID Connect Discovery and authentication with JWTs.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
    • jq, to extract parts of JSON responses

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    The email-verified-only authorization policy ensures that users consuming the API from a given network (IP range 192.168.1/24) must have their emails verified.

    The email_verified claim is a property of the identity added to the JWT by the OpenID Connect issuer.

    The implementation relies on the X-Forwarded-For HTTP header to read the client's IP address.

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  authorization:\n    \"email-verified-only\":\n      when:\n      - predicate: |\n          request.headers['x-forwarded-for'].split(',')[0].matches(\"^192\\\\\\.168\\\\\\.1\\\\\\.\\\\\\d+$\")\n      patternMatching:\n        patterns:\n        - predicate: auth.identity.email_verified\nEOF\n

    Check out the doc about using Common Expression Language (CEL) for reading from the Authorization JSON. Check out as well the common feature Conditions about skipping parts of an AuthConfig in the auth pipeline based on context.

    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api-as-jane-email-verified","title":"Obtain an access token and consume the API as Jane (email verified)","text":"

    Obtain an access token with the Keycloak server for Jane:

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    As Jane, consume the API outside the area where the policy applies:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 123.45.6.78' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    As Jane, consume the API inside the area where the policy applies:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 192.168.1.10' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api-as-peter-email-not-verified","title":"Obtain an access token and consume the API as Peter (email NOT verified)","text":"

    Obtain an access token with the Keycloak server for Peter:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=peter' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    As Peter, consume the API outside the area where the policy applies:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 123.45.6.78' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    As Peter, consume the API inside the area where the policy applies:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 192.168.1.10' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
    "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete namespace keycloak\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/","title":"User guide: Authorization with Keycloak Authorization Services","text":"

    Keycloak provides a powerful set of tools (REST endpoints and administrative UIs), also known as Keycloak Authorization Services, to manage and enforce authorization, workflows for multiple access control mechanisms, including discretionary user access control and user-managed permissions.

    This user guide is an example of how to use Authorino as an adapter to Keycloak Authorization Services while still relying on the reverse-proxy integration pattern, thus not involving importing an authorization library nor rebuilding the application's code.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 JWT verification
    • Authorization \u2192 Open Policy Agent (OPA) Rego policies

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Keycloak server
    • jq, to extract parts of JSON responses

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    Forward local requests to Keycloak running inside the cluster (if using Kind):

    kubectl -n keycloak port-forward deployment/keycloak 8080:8080 2>&1 >/dev/null &\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    In this example, Authorino will accept access tokens (JWTs) issued by the Keycloak server. These JWTs can be either normal Keycloak ID tokens or Requesting Party Tokens (RPT).

    RPTs include claims about the permissions of the user regarding protected resources and scopes associated with a Keycloak authorization client that the user can access.

    When the supplied access token is an RPT, Authorino will just validate whether the user's granted permissions present in the token include the requested resource ID (translated from the path) and scope (inferred from the HTTP method). If the token does not contain a permissions claim (i.e. it is not an RPT), Authorino will negotiate a User-Managed Access (UMA) ticket on behalf of the user and try to obtain an RPT on that UMA ticket.

    In cases of asynchronous user-managed permission control, the first request to the API using a normal Keycloak ID token is denied by Authorino. The user that owns the resource acknowledges the access request in the Keycloak UI. If access is granted, the new permissions will be reflected in subsequent RPTs obtained by Authorino on behalf of the requesting party.

    Whenever an RPT with proper permissions is obtained by Authorino, the RPT is supplied back to the API consumer, so it can be used in subsequent requests thus skipping new negotiations of UMA tickets.

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  authorization:\n    \"uma\":\n      opa:\n        rego: |\n          pat := http.send({\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token\",\"method\": \"post\",\"headers\":{\"Content-Type\":\"application/x-www-form-urlencoded\"},\"raw_body\":\"grant_type=client_credentials\"}).body.access_token\n          resource_id := http.send({\"url\":concat(\"\",[\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/authz/protection/resource_set?uri=\",input.context.request.http.path]),\"method\":\"get\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",pat])}}).body[0]\n          scope := lower(input.context.request.http.method)\n          access_token := trim_prefix(input.context.request.http.headers.authorization, \"Bearer \")\n\n          default rpt = \"\"\n          rpt = access_token { object.get(input.auth.identity, \"authorization\", {}).permissions }\n          else = rpt_str {\n            ticket := http.send({\"url\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/authz/protection/permission\",\"method\":\"post\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",pat]),\"Content-Type\":\"application/json\"},\"raw_body\":concat(\"\",[\"[{\\\"resource_id\\\":\\\"\",resource_id,\"\\\",\\\"resource_scopes\\\":[\\\"\",scope,\"\\\"]}]\"])}).body.ticket\n            rpt_str := object.get(http.send({\"url\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token\",\"method\":\"post\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",access_token]),\"Content-Type\":\"application/x-www-form-urlencoded\"},\"raw_body\":concat(\"\",[\"grant_type=urn:ietf:params:oauth:grant-type:uma-ticket&ticket=\",ticket,\"&submit_request=true\"])}).body, \"access_token\", \"\")\n          }\n\n          allow {\n            permissions := object.get(io.jwt.decode(rpt)[1], \"authorization\", { \"permissions\": [] }).permissions\n            permissions[i]\n            permissions[i].rsid = resource_id\n            permissions[i].scopes[_] = scope\n          }\n        allValues: true\n  response:\n    success:\n      headers:\n        \"x-keycloak\":\n          when:\n\n          - selector: auth.identity.authorization.permissions\n            operator: eq\n            value: \"\"\n          json:\n            properties:\n              \"rpt\":\n                selector: auth.authorization.uma.rpt\nEOF\n
    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster for user Jane:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#consume-the-api","title":"\u277c Consume the API","text":"

    As Jane, try to send a GET request to the protected resource /greetings/1, owned by user John.

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n

    As John, log in to http://localhost:8080/realms/kuadrant/account in the web browser (username: john / password: p), and grant access to the resource greeting-1 for Jane. A pending permission request by Jane shall exist in the list of John's Resources.

    As Jane, try to consume the protected resource /greetings/1 again:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 200 OK\n#\n# {\u2026\n#   \"headers\": {\u2026\n#     \"X-Keycloak\": \"{\\\"rpt\\\":\\\"<RPT>\", \u2026\n

    Copy the RPT from the response and repeat the request now using the RPT to authenticate:

    curl -H \"Authorization: Bearer <RPT>\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/","title":"User guide: Kubernetes RBAC for service authorization (SubjectAccessReview API)","text":"

    Manage permissions in the Kubernetes RBAC and let Authorino to check them in request-time with the authorization system of the cluster.

    Authorino capabilities featured in this guide:
    • Authorization \u2192 Kubernetes SubjectAccessReview
    • Identity verification & authentication \u2192 Kubernetes TokenReview

    Authorino can delegate authorization decision to the Kubernetes authorization system, allowing permissions to be stored and managed using the Kubernetes Role-Based Access Control (RBAC) for example. The feature is based on the SubjectAccessReview API and can be used for resourceAttributes (parameters defined in the AuthConfig) or nonResourceAttributes (inferring HTTP path and verb from the original request).

    Check out as well the user guide about Authentication with Kubernetes tokens (TokenReview API).

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC) and to create TokenRequests (to consume the protected service from outside the cluster)
    • jq

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    The AuthConfig below sets all Kubernetes service accounts as trusted users of the API, and relies on the Kubernetes RBAC to enforce authorization using Kubernetes SubjectAccessReview API for non-resource endpoints:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  - envoy.default.svc.cluster.local\n  authentication:\n    \"service-accounts\":\n      kubernetesTokenReview:\n        audiences: [\"https://kubernetes.default.svc.cluster.local\"]\n  authorization:\n    \"k8s-rbac\":\n      kubernetesSubjectAccessReview:\n        user:\n          selector: auth.identity.user.username\nEOF\n

    Check out the spec for the Authorino Kubernetes SubjectAccessReview authorization feature, for resource attributes permission checks where SubjectAccessReviews issued by Authorino are modeled in terms of common attributes of operations on Kubernetes resources (namespace, API group, kind, name, subresource, verb).

    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-roles-associated-with-endpoints-of-the-api","title":"\u277b Create roles associated with endpoints of the API","text":"

    Because the k8s-rbac policy defined in the AuthConfig in the previous step is for non-resource access review requests, the corresponding roles and role bindings have to be defined at cluster scope.

    Create a talker-api-greeter role whose users and service accounts bound to this role can consume the non-resource endpoints POST /hello and POST /hi of the API:

    kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: talker-api-greeter\nrules:\n\n- nonResourceURLs: [\"/hello\"]\n  verbs: [\"post\"]\n- nonResourceURLs: [\"/hi\"]\n  verbs: [\"post\"]\nEOF\n

    Create a talker-api-speaker role whose users and service accounts bound to this role can consume the non-resource endpoints POST /say/* of the API:

    kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: talker-api-speaker\nrules:\n\n- nonResourceURLs: [\"/say/*\"]\n  verbs: [\"post\"]\nEOF\n
    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-the-serviceaccounts-and-permissions-to-consume-the-api","title":"\u277c Create the ServiceAccounts and permissions to consume the API","text":"

    Create service accounts api-consumer-1 and api-consumer-2:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: api-consumer-1\nEOF\n
    kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: api-consumer-2\nEOF\n

    Bind both service accounts to the talker-api-greeter role:

    kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: talker-api-greeter-rolebinding\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: talker-api-greeter\nsubjects:\n\n- kind: ServiceAccount\n  name: api-consumer-1\n  namespace: default\n- kind: ServiceAccount\n  name: api-consumer-2\n  namespace: default\nEOF\n

    Bind service account api-consumer-1 to the talker-api-speaker role:

    kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: talker-api-speaker-rolebinding\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: talker-api-speaker\nsubjects:\n\n- kind: ServiceAccount\n  name: api-consumer-1\n  namespace: default\nEOF\n
    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#consume-the-api","title":"\u277d Consume the API","text":"

    Run a pod that consumes one of the greeting endpoints of the API from inside the cluster, as service account api-consumer-1, bound to the talker-api-greeter and talker-api-speaker cluster roles in the Kubernetes RBAC:

    kubectl run greeter --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n  \"apiVersion\": \"v1\",\n  \"spec\": {\n    \"containers\": [{\n      \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n      \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/hi\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n      \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n    }],\n    \"serviceAccountName\": \"api-consumer-1\",\n    \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n  }\n}' -- sh\n# Sending...\n# 200\n

    Run a pod that sends a POST request to /say/blah from within the cluster, as service account api-consumer-1:

    kubectl run speaker --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n  \"apiVersion\": \"v1\",\n  \"spec\": {\n    \"containers\": [{\n      \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n      \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/say/blah\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n      \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n    }],\n    \"serviceAccountName\": \"api-consumer-1\",\n    \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n  }\n}' -- sh\n# Sending...\n# 200\n

    Run a pod that sends a POST request to /say/blah from within the cluster, as service account api-consumer-2, bound only to the talker-api-greeter cluster role in the Kubernetes RBAC:

    kubectl run speaker --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n  \"apiVersion\": \"v1\",\n  \"spec\": {\n    \"containers\": [{\n      \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n      \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/say/blah\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n      \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n    }],\n    \"serviceAccountName\": \"api-consumer-2\",\n    \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n  }\n}' -- sh\n# Sending...\n# 403\n
    Extra: consume the API as service account api-consumer-2 from outside the cluster

    Obtain a short-lived access token for service account api-consumer-2, bound to the talker-api-greeter cluster role in the Kubernetes RBAC, using the Kubernetes TokenRequest API:

    export ACCESS_TOKEN=$(echo '{ \"apiVersion\": \"authentication.k8s.io/v1\", \"kind\": \"TokenRequest\", \"spec\": { \"expirationSeconds\": 600 } }' | kubectl create --raw /api/v1/namespaces/default/serviceaccounts/api-consumer-2/token -f - | jq -r .status.token)\n

    Consume the API as api-consumer-2 from outside the cluster:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X POST http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X POST http://talker-api.127.0.0.1.nip.io:8000/say/something -i\n# HTTP/1.1 403 Forbidden\n
    "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete serviceaccount/api-consumer-1\nkubectl delete serviceaccount/api-consumer-2\nkubectl delete clusterrolebinding/talker-api-greeter-rolebinding\nkubectl delete clusterrolebinding/talker-api-speaker-rolebinding\nkubectl delete clusterrole/talker-api-greeter\nkubectl delete clusterrole/talker-api-speaker\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/","title":"User guide: Authentication with Kubernetes tokens (TokenReview API)","text":"

    Validate Kubernetes Service Account tokens to authenticate requests to your protected hosts.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 Kubernetes TokenReview

    Authorino can verify Kubernetes-valid access tokens (using Kubernetes TokenReview API).

    These tokens can be either ServiceAccount tokens or any valid user access tokens issued to users of the Kubernetes server API.

    The audiences claim of the token must include the requested host and port of the protected API (default), or all audiences specified in spec.identity.kubernetes.audiences of the AuthConfig.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC) and to create TokenRequests (to consume the protected service from outside the cluster)
    • jq

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  - envoy.default.svc.cluster.local\n  authentication:\n    \"authorized-service-accounts\":\n      kubernetesTokenReview:\n        audiences:\n        - talker-api\nEOF\n
    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-protected-by-authorino","title":"\u277b Consume the API protected by Authorino","text":""},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#create-a-serviceaccount","title":"Create a ServiceAccount","text":"

    Create a Kubernetes ServiceAccount to identify the consumer application that will send requests to the protected API:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: api-consumer-1\nEOF\n
    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-from-outside-the-cluster","title":"Consume the API from outside the cluster","text":"

    Obtain a short-lived access token for the api-consumer-1 service account:

    export ACCESS_TOKEN=$(echo '{ \"apiVersion\": \"authentication.k8s.io/v1\", \"kind\": \"TokenRequest\", \"spec\": { \"audiences\": [\"talker-api\"], \"expirationSeconds\": 600 } }' | kubectl create --raw /api/v1/namespaces/default/serviceaccounts/api-consumer-1/token -f - | jq -r .status.token)\n

    Consume the API with a valid Kubernetes token:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n

    Consume the API with the Kubernetes token expired (10 minutes):

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"authorized-service-accounts\"\n# x-ext-auth-reason: Not authenticated\n
    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-from-inside-the-cluster","title":"Consume the API from inside the cluster","text":"

    Deploy an application that consumes an endpoint of the Talker API, in a loop, every 10 seconds. The application uses a short-lived service account token mounted inside the container using Kubernetes Service Account Token Volume Projection to authenticate.

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Pod\nmetadata:\n  name: api-consumer\nspec:\n  containers:\n\n  - name: api-consumer\n    image: quay.io/kuadrant/authorino-examples:api-consumer\n    command: [\"./run\"]\n    args:\n      - --endpoint=http://envoy.default.svc.cluster.local:8000/hello\n      - --token-path=/var/run/secrets/tokens/api-token\n      - --interval=10\n    volumeMounts:\n    - mountPath: /var/run/secrets/tokens\n      name: talker-api-access-token\n  serviceAccountName: api-consumer-1\n  volumes:\n  - name: talker-api-access-token\n    projected:\n      sources:\n      - serviceAccountToken:\n          path: api-token\n          expirationSeconds: 7200\n          audience: talker-api\nEOF\n

    Check the logs of api-consumer:

    kubectl logs -f api-consumer\n# Sending...\n# 200\n# 200\n# 200\n# 200\n# ...\n
    "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete pod/api-consumer\nkubectl delete serviceaccount/api-consumer-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/mtls-authentication/","title":"User guide: Authentication with X.509 certificates and Mutual Transport Layer Security (mTLS)","text":"

    Verify client X.509 certificates against trusted root CAs stored in Kubernetes Secrets to authenticate access to APIs protected with Authorino.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 X.509 client certificate authentication
    • Authorization \u2192 Pattern-matching authorization

    Authorino can verify x509 certificates presented by clients for authentication on the request to the protected APIs, at application level.

    Trusted root Certificate Authorities (CA) are stored as Kubernetes kubernetes.io/tls Secrets labeled according to selectors specified in the AuthConfig, watched and cached by Authorino.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/mtls-authentication/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.

    At step \u277b, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/mtls-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/mtls-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following commands will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS enabled3.

    Create the TLS certificates for the Authorino service:

    curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/default/g\" | kubectl apply -f -\n

    Request the Authorino instance:

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      certSecretRef:\n        name: authorino-server-cert\n  oidcServer:\n    tls:\n      certSecretRef:\n        name: authorino-oidc-server-cert\nEOF\n
    "},{"location":"authorino/docs/user-guides/mtls-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/mtls-authentication/#create-a-ca","title":"\u2779 Create a CA","text":"

    Create a CA (Certificate Authority) certificate to issue the client certificates that will be used to authenticate clients that send requests to the Talker API:

    openssl req -x509 -sha512 -nodes \\\n  -days 365 \\\n  -newkey rsa:4096 \\\n  -subj \"/CN=talker-api-ca\" \\\n  -addext basicConstraints=CA:TRUE \\\n  -addext keyUsage=digitalSignature,keyCertSign \\\n  -keyout /tmp/ca.key \\\n  -out /tmp/ca.crt\n

    Store the CA cert in a Kubernetes Secret, labeled to be discovered by Authorino and to be mounted in the file system of the Envoy container:

    kubectl create secret tls talker-api-ca --cert=/tmp/ca.crt --key=/tmp/ca.key\nkubectl label secret talker-api-ca authorino.kuadrant.io/managed-by=authorino app=talker-api\n

    Prepare an extension file for the client certificate signing requests:

    cat > /tmp/x509v3.ext << EOF\nauthorityKeyIdentifier=keyid,issuer\nbasicConstraints=CA:FALSE\nkeyUsage=digitalSignature,nonRepudiation,keyEncipherment,dataEncipherment\nextendedKeyUsage=clientAuth\nEOF\n
    "},{"location":"authorino/docs/user-guides/mtls-authentication/#setup-envoy","title":"\u277a Setup Envoy","text":"

    The following command deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  labels:\n    app: envoy\n  name: envoy\ndata:\n  envoy.yaml: |\n    static_resources:\n      listeners:\n\n      - address:\n          socket_address:\n            address: 0.0.0.0\n            port_value: 8443\n        filter_chains:\n        - transport_socket:\n            name: envoy.transport_sockets.tls\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n              common_tls_context:\n                tls_certificates:\n                - certificate_chain: {filename: \"/etc/ssl/certs/talker-api/tls.crt\"}\n                  private_key: {filename: \"/etc/ssl/certs/talker-api/tls.key\"}\n                validation_context:\n                  trusted_ca:\n                    filename: /etc/ssl/certs/talker-api/tls.crt\n          filters:\n          - name: envoy.http_connection_manager\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n              stat_prefix: local\n              route_config:\n                name: local_route\n                virtual_hosts:\n                - name: local_service\n                  domains: ['*']\n                  routes:\n                  - match: { prefix: / }\n                    route: { cluster: talker-api }\n              http_filters:\n              - name: envoy.filters.http.ext_authz\n                typed_config:\n                  \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n                  transport_api_version: V3\n                  failure_mode_allow: false\n                  include_peer_certificate: true\n                  grpc_service:\n                    envoy_grpc: { cluster_name: authorino }\n                    timeout: 1s\n              - name: envoy.filters.http.router\n                typed_config: {}\n              use_remote_address: true\n      clusters:\n      - name: authorino\n        connect_timeout: 0.25s\n        type: strict_dns\n        lb_policy: round_robin\n        http2_protocol_options: {}\n        load_assignment:\n          cluster_name: authorino\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: authorino-authorino-authorization\n                    port_value: 50051\n        transport_socket:\n          name: envoy.transport_sockets.tls\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n            common_tls_context:\n              validation_context:\n                trusted_ca:\n                  filename: /etc/ssl/certs/authorino-ca-cert.crt\n      - name: talker-api\n        connect_timeout: 0.25s\n        type: strict_dns\n        lb_policy: round_robin\n        load_assignment:\n          cluster_name: talker-api\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: talker-api\n                    port_value: 3000\n    admin:\n      access_log_path: \"/tmp/admin_access.log\"\n      address:\n        socket_address:\n          address: 0.0.0.0\n          port_value: 8001\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: envoy\n  name: envoy\nspec:\n  selector:\n    matchLabels:\n      app: envoy\n  template:\n    metadata:\n      labels:\n        app: envoy\n    spec:\n      containers:\n      - args:\n        - --config-path /usr/local/etc/envoy/envoy.yaml\n        - --service-cluster front-proxy\n        - --log-level info\n        - --component-log-level filter:trace,http:debug,router:debug\n        command:\n        - /usr/local/bin/envoy\n        image: envoyproxy/envoy:v1.19-latest\n        name: envoy\n        ports:\n        - containerPort: 8443\n          name: web\n        - containerPort: 8001\n          name: admin\n        volumeMounts:\n        - mountPath: /usr/local/etc/envoy\n          name: config\n          readOnly: true\n        - mountPath: /etc/ssl/certs/authorino-ca-cert.crt\n          name: authorino-ca-cert\n          readOnly: true\n          subPath: ca.crt\n        - mountPath: /etc/ssl/certs/talker-api\n          name: talker-api-ca\n          readOnly: true\n      volumes:\n      - configMap:\n          items:\n          - key: envoy.yaml\n            path: envoy.yaml\n          name: envoy\n        name: config\n      - name: authorino-ca-cert\n        secret:\n          defaultMode: 420\n          secretName: authorino-ca-cert\n      - name: talker-api-ca\n        secret:\n          defaultMode: 420\n          secretName: talker-api-ca\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: envoy\nspec:\n  selector:\n    app: envoy\n  ports:\n  - name: web\n    port: 8443\n    protocol: TCP\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-wildcard-host\nspec:\n  rules:\n  - host: talker-api.127.0.0.1.nip.io\n    http:\n      paths:\n      - backend:\n          service:\n            name: envoy\n            port: { number: 8443 }\n        path: /\n        pathType: Prefix\nEOF\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8443 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8443:8443 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/mtls-authentication/#create-the-authconfig","title":"\u277b Create the AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"mtls\":\n      x509:\n        selector:\n          matchLabels:\n            app: talker-api\n  authorization:\n    \"acme\":\n      patternMatching:\n        patterns:\n        - selector: auth.identity.Organization\n          operator: incl\n          value: ACME Inc.\nEOF\n
    "},{"location":"authorino/docs/user-guides/mtls-authentication/#consume-the-api","title":"\u277c Consume the API","text":"

    With a TLS certificate signed by the trusted CA:

    openssl genrsa -out /tmp/aisha.key 4096\nopenssl req -new -subj \"/CN=aisha/C=PK/L=Islamabad/O=ACME Inc./OU=Engineering\" -key /tmp/aisha.key -out /tmp/aisha.csr\nopenssl x509 -req -sha512 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/aisha.csr -out /tmp/aisha.crt\n\ncurl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8443 -i\n# HTTP/1.1 200 OK\n

    With a TLS certificate signed by the trusted CA, though missing an authorized Organization:

    openssl genrsa -out /tmp/john.key 4096\nopenssl req -new -subj \"/CN=john/C=UK/L=London\" -key /tmp/john.key -out /tmp/john.csr\nopenssl x509 -req -sha512 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/john.csr -out /tmp/john.crt\n\ncurl -k --cert /tmp/john.crt --key /tmp/john.key https://talker-api.127.0.0.1.nip.io:8443 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
    "},{"location":"authorino/docs/user-guides/mtls-authentication/#try-the-authconfig-via-raw-http-authorization-interface","title":"\u277d Try the AuthConfig via raw HTTP authorization interface","text":"

    Expose Authorino's raw HTTP authorization to the local host:

    kubectl port-forward service/authorino-authorino-authorization 5001:5001 2>&1 >/dev/null &\n

    With a TLS certificate signed by the trusted CA:

    curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i\n# HTTP/2 200\n

    With a TLS certificate signed by an unknown authority:

    openssl req -x509 -sha512 -nodes \\\n  -days 365 \\\n  -newkey rsa:4096 \\\n  -subj \"/CN=untrusted\" \\\n  -addext basicConstraints=CA:TRUE \\\n  -addext keyUsage=digitalSignature,keyCertSign \\\n  -keyout /tmp/untrusted-ca.key \\\n  -out /tmp/untrusted-ca.crt\n\nopenssl genrsa -out /tmp/niko.key 4096\nopenssl req -new -subj \"/CN=niko/C=JP/L=Osaka\" -key /tmp/niko.key -out /tmp/niko.csr\nopenssl x509 -req -sha512 -days 1 -CA /tmp/untrusted-ca.crt -CAkey /tmp/untrusted-ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/niko.csr -out /tmp/niko.crt\n\ncurl -k --cert /tmp/niko.crt --key /tmp/niko.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i\n# HTTP/2 401\n# www-authenticate: Basic realm=\"mtls\"\n# x-ext-auth-reason: x509: certificate signed by unknown authority\n
    "},{"location":"authorino/docs/user-guides/mtls-authentication/#revoke-an-entire-chain-of-certificates","title":"\u277e Revoke an entire chain of certificates","text":"
    kubectl delete secret/talker-api-ca\n

    Even if the deleted root certificate is still cached and accepted at the gateway, Authorino will revoke access at application level immediately.

    Try with a previously accepted certificate:

    curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8443 -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Basic realm=\"mtls\"\n# x-ext-auth-reason: x509: certificate signed by unknown authority\n
    "},{"location":"authorino/docs/user-guides/mtls-authentication/#cleanup","title":"Cleanup","text":"
    kind delete cluster --name authorino-tutorial\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/","title":"User guide: OAuth 2.0 token introspection (RFC 7662)","text":"

    Introspect OAuth 2.0 access tokens (e.g. opaque tokens) for online user data and token validation in request-time.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 OAuth 2.0 introspection
    • Authorization \u2192 Pattern-matching authorization

    Authorino can perform OAuth 2.0 token introspection (RFC 7662) on the access tokens supplied in the requests to protected APIs. This is particularly useful when using opaque tokens, for remote checking the token validity and resolving the identity object.

    Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.

    Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • OAuth 2.0 server that implements the token introspection endpoint (RFC 7662) (e.g. Keycloak or a12n-server)
    • jq, to extract parts of JSON responses

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy a Keycloak server preloaded with the realm settings required for this guide:

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    Deploy an a12n-server server preloaded with all settings required for this guide:

    kubectl create namespace a12n-server\nkubectl -n a12n-server apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/a12n-server/a12n-server-deploy.yaml\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create the required secrets that will be used by Authorino to authenticate with Keycloak and a12n-server during the introspection request:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: oauth2-token-introspection-credentials-keycloak\nstringData:\n  clientID: talker-api\n  clientSecret: 523b92b6-625d-4e1e-a313-77e7a8ae4e88\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: oauth2-token-introspection-credentials-a12n-server\nstringData:\n  clientID: talker-api\n  clientSecret: V6g-2Eq2ALB1_WHAswzoeZofJ_e86RI4tdjClDDDb4g\ntype: Opaque\nEOF\n

    Create the Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak\":\n      oauth2Introspection:\n        endpoint: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token/introspect\n        tokenTypeHint: requesting_party_token\n        credentialsRef:\n          name: oauth2-token-introspection-credentials-keycloak\n    \"a12n-server\":\n      oauth2Introspection:\n        endpoint: http://a12n-server.a12n-server.svc.cluster.local:8531/introspect\n        credentialsRef:\n          name: oauth2-token-introspection-credentials-a12n-server\n  authorization:\n    \"can-read\":\n      when:\n      - selector: auth.identity.privileges\n        operator: neq\n        value: \"\"\n      patternMatching:\n        patterns:\n        - selector: auth.identity.privileges.talker-api\n          operator: incl\n          value: read\nEOF\n

    On every request, Authorino will try to verify the token remotely with the Keycloak server and the a12n-server server.

    For authorization, whenever the introspected token data includes a privileges property (returned by a12n-server), Authorino will enforce only consumers whose privileges.talker-api includes the \"read\" permission are granted access.

    Check out the docs for information about the common feature Conditions about skipping parts of an AuthConfig in the auth pipeline based on context.

    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-with-keycloak-and-consume-the-api","title":"Obtain an access token with Keycloak and consume the API","text":"

    Obtain an access token with the Keycloak server for user Jane:

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:

    export $(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r '\"ACCESS_TOKEN=\"+.access_token,\"REFRESH_TOKEN=\"+.refresh_token')\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    As user Jane, consume the API:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    Revoke the access token and try to consume the API again:

    kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/logout -H \"Content-Type: application/x-www-form-urlencoded\" -d \"refresh_token=$REFRESH_TOKEN\" -d 'token_type_hint=requesting_party_token' -u demo:\n
    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-with-a12n-server-and-consume-the-api","title":"Obtain an access token with a12n-server and consume the API","text":"

    Forward local requests to a12n-server instance running in the cluster:

    kubectl -n a12n-server port-forward deployment/a12n-server 8531:8531 2>&1 >/dev/null &\n

    Obtain an access token with the a12n-server server for service account service-account-1:

    ACCESS_TOKEN=$(curl -d 'grant_type=client_credentials' -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s \"http://localhost:8531/token\" | jq -r .access_token)\n

    You can as well obtain an access token from within the cluster, in case your a12n-server is not reachable from the outside:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://a12n-server.a12n-server.svc.cluster.local:8531/token -s -d 'grant_type=client_credentials' -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s | jq -r .access_token)\n

    Verify the issued token is an opaque access token in this case:

    echo $ACCESS_TOKEN\n

    As service-account-1, consumer the API with a valid access token:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    Revoke the access token and try to consume the API again:

    curl -d \"token=$ACCESS_TOKEN\" -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s \"http://localhost:8531/revoke\" -i\n
    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#consume-the-api-with-a-missing-or-invalid-access-token","title":"Consume the API with a missing or invalid access token","text":"
    curl -H \"Authorization: Bearer invalid\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
    "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete secret/oauth2-token-introspection-credentials-keycloak\nkubectl delete secret/oauth2-token-introspection-credentials-a12n-server\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\nkubectl delete namespace a12n-server\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/observability/","title":"Observability","text":""},{"location":"authorino/docs/user-guides/observability/#metrics","title":"Metrics","text":"

    Authorino exports metrics at 2 endpoints:

    /metrics Metrics of the controller-runtime about reconciliation (caching) of AuthConfigs and API key Secrets /server-metrics Metrics of the external authorization gRPC and OIDC/Festival Wristband validation built-in HTTP servers

    The Authorino Operator creates a Kubernetes Service named <authorino-cr-name>-controller-metrics that exposes the endpoints on port 8080. The Authorino instance allows to modify the port number of the metrics endpoints, by setting the --metrics-addr command-line flag (default: :8080).

    Main metrics exported by endpoint1:

    Endpoint: /metrics Metric name Description\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 Labels Type controller_runtime_reconcile_total Total number of reconciliations per controller controller=authconfig|secret, result=success|error|requeue counter controller_runtime_reconcile_errors_total Total number of reconciliation errors per controller controller=authconfig|secret counter controller_runtime_reconcile_time_seconds Length of time per reconciliation per controller controller=authconfig|secret histogram controller_runtime_max_concurrent_reconciles Maximum number of concurrent reconciles per controller controller=authconfig|secret gauge workqueue_adds_total Total number of adds handled by workqueue name=authconfig|secret counter workqueue_depth Current depth of workqueue name=authconfig|secret gauge workqueue_queue_duration_seconds How long in seconds an item stays in workqueue before being requested name=authconfig|secret histogram workqueue_longest_running_processor_seconds How many seconds has the longest running processor for workqueue been running. name=authconfig|secret gauge workqueue_retries_total Total number of retries handled by workqueue name=authconfig|secret counter workqueue_unfinished_work_seconds How many seconds of work has been done that is in progress and hasn't been observed by work_duration. name=authconfig|secret gauge workqueue_work_duration_seconds How long in seconds processing an item from workqueue takes. name=authconfig|secret histogram rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host. code=200|404, method=GET|PUT|POST counter Endpoint: /server-metrics Metric name Description Labels Type auth_server_evaluator_total2 Total number of evaluations of individual authconfig rule performed by the auth server. namespace, authconfig, evaluator_type, evaluator_name counter auth_server_evaluator_cancelled2 Number of evaluations of individual authconfig rule cancelled by the auth server. namespace, authconfig, evaluator_type, evaluator_name counter auth_server_evaluator_ignored2 Number of evaluations of individual authconfig rule ignored by the auth server. namespace, authconfig, evaluator_type, evaluator_name counter auth_server_evaluator_denied2 Number of denials from individual authconfig rule evaluated by the auth server. namespace, authconfig, evaluator_type, evaluator_name counter auth_server_evaluator_duration_seconds2 Response latency of individual authconfig rule evaluated by the auth server (in seconds). namespace, authconfig, evaluator_type, evaluator_name histogram auth_server_authconfig_total Total number of authconfigs enforced by the auth server, partitioned by authconfig. namespace, authconfig counter auth_server_authconfig_response_status Response status of authconfigs sent by the auth server, partitioned by authconfig. namespace, authconfig, status=OK|UNAUTHENTICATED,PERMISSION_DENIED counter auth_server_authconfig_duration_seconds Response latency of authconfig enforced by the auth server (in seconds). namespace, authconfig histogram auth_server_response_status Response status of authconfigs sent by the auth server. status=OK|UNAUTHENTICATED,PERMISSION_DENIED|NOT_FOUND counter grpc_server_handled_total Total number of RPCs completed on the server, regardless of success or failure. grpc_code=OK|Aborted|Canceled|DeadlineExceeded|Internal|ResourceExhausted|Unknown, grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization counter grpc_server_handling_seconds Response latency (seconds) of gRPC that had been application-level handled by the server. grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization histogram grpc_server_msg_received_total Total number of RPC stream messages received on the server. grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization counter grpc_server_msg_sent_total Total number of gRPC stream messages sent by the server. grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization counter grpc_server_started_total Total number of RPCs started on the server. grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization counter http_server_handled_total Total number of calls completed on the raw HTTP authorization server, regardless of success or failure. http_code counter http_server_handling_seconds Response latency (seconds) of raw HTTP authorization request that had been application-level handled by the server. histogram oidc_server_requests_total Number of get requests received on the OIDC (Festival Wristband) server. namespace, authconfig, wristband, path=oidc-config|jwks counter oidc_server_response_status Status of HTTP response sent by the OIDC (Festival Wristband) server. status=200|404 counter

    1 Both endpoints export metrics about the Go runtime, such as number of goroutines (go_goroutines) and threads (go_threads), usage of CPU, memory and GC stats.

    2 Opt-in metrics: auth_server_evaluator_* metrics require authconfig.spec.(identity|metadata|authorization|response).metrics: true (default: false). This can be enforced for the entire instance (all AuthConfigs and evaluators), by setting the --deep-metrics-enabled command-line flag in the Authorino deployment.

    Example of metrics exported at the /metrics endpoint
    # HELP controller_runtime_active_workers Number of currently used workers per controller\n# TYPE controller_runtime_active_workers gauge\ncontroller_runtime_active_workers{controller=\"authconfig\"} 0\ncontroller_runtime_active_workers{controller=\"secret\"} 0\n# HELP controller_runtime_max_concurrent_reconciles Maximum number of concurrent reconciles per controller\n# TYPE controller_runtime_max_concurrent_reconciles gauge\ncontroller_runtime_max_concurrent_reconciles{controller=\"authconfig\"} 1\ncontroller_runtime_max_concurrent_reconciles{controller=\"secret\"} 1\n# HELP controller_runtime_reconcile_errors_total Total number of reconciliation errors per controller\n# TYPE controller_runtime_reconcile_errors_total counter\ncontroller_runtime_reconcile_errors_total{controller=\"authconfig\"} 12\ncontroller_runtime_reconcile_errors_total{controller=\"secret\"} 0\n# HELP controller_runtime_reconcile_time_seconds Length of time per reconciliation per controller\n# TYPE controller_runtime_reconcile_time_seconds histogram\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.005\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.01\"} 11\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.025\"} 17\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.05\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.1\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.15\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.2\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.25\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.3\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.35\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.4\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.45\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.6\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.7\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.8\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.9\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.25\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.75\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"2\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"2.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"3\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"3.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"4\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"4.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"5\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"6\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"7\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"8\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"9\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"10\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"15\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"20\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"25\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"30\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"40\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"50\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"60\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"+Inf\"} 19\ncontroller_runtime_reconcile_time_seconds_sum{controller=\"authconfig\"} 5.171108321999999\ncontroller_runtime_reconcile_time_seconds_count{controller=\"authconfig\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.005\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.01\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.025\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.05\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.1\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.15\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.2\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.3\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.35\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.4\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.45\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.6\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.7\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.8\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.9\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.75\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"2\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"2.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"3\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"3.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"4\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"4.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"6\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"7\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"8\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"9\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"10\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"15\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"20\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"30\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"40\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"50\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"60\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"+Inf\"} 1\ncontroller_runtime_reconcile_time_seconds_sum{controller=\"secret\"} 0.000138025\ncontroller_runtime_reconcile_time_seconds_count{controller=\"secret\"} 1\n# HELP controller_runtime_reconcile_total Total number of reconciliations per controller\n# TYPE controller_runtime_reconcile_total counter\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"error\"} 12\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"requeue\"} 0\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"requeue_after\"} 0\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"success\"} 7\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"error\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"requeue\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"requeue_after\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"success\"} 1\n# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime.\n# TYPE go_gc_cycles_automatic_gc_cycles_total counter\ngo_gc_cycles_automatic_gc_cycles_total 13\n# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application.\n# TYPE go_gc_cycles_forced_gc_cycles_total counter\ngo_gc_cycles_forced_gc_cycles_total 0\n# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles.\n# TYPE go_gc_cycles_total_gc_cycles_total counter\ngo_gc_cycles_total_gc_cycles_total 13\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 4.5971e-05\ngo_gc_duration_seconds{quantile=\"0.25\"} 5.69e-05\ngo_gc_duration_seconds{quantile=\"0.5\"} 0.000140699\ngo_gc_duration_seconds{quantile=\"0.75\"} 0.000313162\ngo_gc_duration_seconds{quantile=\"1\"} 0.001692423\ngo_gc_duration_seconds_sum 0.003671076\ngo_gc_duration_seconds_count 13\n# HELP go_gc_heap_allocs_by_size_bytes_total Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_by_size_bytes_total histogram\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 6357\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 45065\n[...]\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 128306\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"+Inf\"} 128327\ngo_gc_heap_allocs_by_size_bytes_total_sum 1.5021512e+07\ngo_gc_heap_allocs_by_size_bytes_total_count 128327\n# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application.\n# TYPE go_gc_heap_allocs_bytes_total counter\ngo_gc_heap_allocs_bytes_total 1.5021512e+07\n# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_objects_total counter\ngo_gc_heap_allocs_objects_total 128327\n# HELP go_gc_heap_frees_by_size_bytes_total Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_by_size_bytes_total histogram\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 3885\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 33418\n[...]\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 96417\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"+Inf\"} 96425\ngo_gc_heap_frees_by_size_bytes_total_sum 9.880944e+06\ngo_gc_heap_frees_by_size_bytes_total_count 96425\n# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector.\n# TYPE go_gc_heap_frees_bytes_total counter\ngo_gc_heap_frees_bytes_total 9.880944e+06\n# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_objects_total counter\ngo_gc_heap_frees_objects_total 96425\n# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle.\n# TYPE go_gc_heap_goal_bytes gauge\ngo_gc_heap_goal_bytes 9.356624e+06\n# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory.\n# TYPE go_gc_heap_objects_objects gauge\ngo_gc_heap_objects_objects 31902\n# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size.\n# TYPE go_gc_heap_tiny_allocs_objects_total counter\ngo_gc_heap_tiny_allocs_objects_total 11750\n# HELP go_gc_pauses_seconds_total Distribution individual GC-related stop-the-world pause latencies.\n# TYPE go_gc_pauses_seconds_total histogram\ngo_gc_pauses_seconds_total_bucket{le=\"9.999999999999999e-10\"} 0\ngo_gc_pauses_seconds_total_bucket{le=\"1.9999999999999997e-09\"} 0\n[...]\ngo_gc_pauses_seconds_total_bucket{le=\"206708.18602188796\"} 26\ngo_gc_pauses_seconds_total_bucket{le=\"+Inf\"} 26\ngo_gc_pauses_seconds_total_sum 0.003151488\ngo_gc_pauses_seconds_total_count 26\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 80\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.18.7\"} 1\n# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory.\n# TYPE go_memory_classes_heap_free_bytes gauge\ngo_memory_classes_heap_free_bytes 589824\n# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector.\n# TYPE go_memory_classes_heap_objects_bytes gauge\ngo_memory_classes_heap_objects_bytes 5.140568e+06\n# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory.\n# TYPE go_memory_classes_heap_released_bytes gauge\ngo_memory_classes_heap_released_bytes 4.005888e+06\n# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use.\n# TYPE go_memory_classes_heap_stacks_bytes gauge\ngo_memory_classes_heap_stacks_bytes 786432\n# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects.\n# TYPE go_memory_classes_heap_unused_bytes gauge\ngo_memory_classes_heap_unused_bytes 2.0602e+06\n# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use.\n# TYPE go_memory_classes_metadata_mcache_free_bytes gauge\ngo_memory_classes_metadata_mcache_free_bytes 13984\n# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used.\n# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge\ngo_memory_classes_metadata_mcache_inuse_bytes 2400\n# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use.\n# TYPE go_memory_classes_metadata_mspan_free_bytes gauge\ngo_memory_classes_metadata_mspan_free_bytes 17104\n# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used.\n# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge\ngo_memory_classes_metadata_mspan_inuse_bytes 113968\n# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata.\n# TYPE go_memory_classes_metadata_other_bytes gauge\ngo_memory_classes_metadata_other_bytes 5.544408e+06\n# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system.\n# TYPE go_memory_classes_os_stacks_bytes gauge\ngo_memory_classes_os_stacks_bytes 0\n# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more.\n# TYPE go_memory_classes_other_bytes gauge\ngo_memory_classes_other_bytes 537777\n# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling.\n# TYPE go_memory_classes_profiling_buckets_bytes gauge\ngo_memory_classes_profiling_buckets_bytes 1.455487e+06\n# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes.\n# TYPE go_memory_classes_total_bytes gauge\ngo_memory_classes_total_bytes 2.026804e+07\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 5.140568e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 1.5021512e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.455487e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 108175\n# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.\n# TYPE go_memstats_gc_cpu_fraction gauge\ngo_memstats_gc_cpu_fraction 0\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 5.544408e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 5.140568e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 4.595712e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 7.200768e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 31902\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 4.005888e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.179648e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.6461572121033354e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 140077\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 2400\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 16384\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 113968\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 131072\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 9.356624e+06\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 537777\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 786432\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 786432\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.026804e+07\n# HELP go_sched_goroutines_goroutines Count of live goroutines.\n# TYPE go_sched_goroutines_goroutines gauge\ngo_sched_goroutines_goroutines 80\n# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running.\n# TYPE go_sched_latencies_seconds histogram\ngo_sched_latencies_seconds_bucket{le=\"9.999999999999999e-10\"} 244\ngo_sched_latencies_seconds_bucket{le=\"1.9999999999999997e-09\"} 244\n[...]\ngo_sched_latencies_seconds_bucket{le=\"206708.18602188796\"} 2336\ngo_sched_latencies_seconds_bucket{le=\"+Inf\"} 2336\ngo_sched_latencies_seconds_sum 0.18509832400000004\ngo_sched_latencies_seconds_count 2336\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 8\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 1.84\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 1.048576e+06\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 14\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 4.3728896e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.64615612779e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 7.65362176e+08\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n# HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host.\n# TYPE rest_client_requests_total counter\nrest_client_requests_total{code=\"200\",host=\"10.96.0.1:443\",method=\"GET\"} 114\nrest_client_requests_total{code=\"200\",host=\"10.96.0.1:443\",method=\"PUT\"} 4\n# HELP workqueue_adds_total Total number of adds handled by workqueue\n# TYPE workqueue_adds_total counter\nworkqueue_adds_total{name=\"authconfig\"} 19\nworkqueue_adds_total{name=\"secret\"} 1\n# HELP workqueue_depth Current depth of workqueue\n# TYPE workqueue_depth gauge\nworkqueue_depth{name=\"authconfig\"} 0\nworkqueue_depth{name=\"secret\"} 0\n# HELP workqueue_longest_running_processor_seconds How many seconds has the longest running processor for workqueue been running.\n# TYPE workqueue_longest_running_processor_seconds gauge\nworkqueue_longest_running_processor_seconds{name=\"authconfig\"} 0\nworkqueue_longest_running_processor_seconds{name=\"secret\"} 0\n# HELP workqueue_queue_duration_seconds How long in seconds an item stays in workqueue before being requested\n# TYPE workqueue_queue_duration_seconds histogram\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-08\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-07\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-06\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-06\"} 8\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-05\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.001\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.01\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.1\"} 18\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1\"} 18\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"10\"} 19\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"+Inf\"} 19\nworkqueue_queue_duration_seconds_sum{name=\"authconfig\"} 4.969016371\nworkqueue_queue_duration_seconds_count{name=\"authconfig\"} 19\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-08\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-07\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-06\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-06\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-05\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.001\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.01\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.1\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"10\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"+Inf\"} 1\nworkqueue_queue_duration_seconds_sum{name=\"secret\"} 4.67e-06\nworkqueue_queue_duration_seconds_count{name=\"secret\"} 1\n# HELP workqueue_retries_total Total number of retries handled by workqueue\n# TYPE workqueue_retries_total counter\nworkqueue_retries_total{name=\"authconfig\"} 12\nworkqueue_retries_total{name=\"secret\"} 0\n# HELP workqueue_unfinished_work_seconds How many seconds of work has been done that is in progress and hasn't been observed by work_duration. Large values indicate stuck threads. One can deduce the number of stuck threads by observing the rate at which this increases.\n# TYPE workqueue_unfinished_work_seconds gauge\nworkqueue_unfinished_work_seconds{name=\"authconfig\"} 0\nworkqueue_unfinished_work_seconds{name=\"secret\"} 0\n# HELP workqueue_work_duration_seconds How long in seconds processing an item from workqueue takes.\n# TYPE workqueue_work_duration_seconds histogram\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-08\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-07\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-05\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.001\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.01\"} 11\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.1\"} 18\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1\"} 18\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"10\"} 19\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"+Inf\"} 19\nworkqueue_work_duration_seconds_sum{name=\"authconfig\"} 5.171738079000001\nworkqueue_work_duration_seconds_count{name=\"authconfig\"} 19\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-08\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-07\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-05\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.001\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.01\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.1\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"10\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"+Inf\"} 1\nworkqueue_work_duration_seconds_sum{name=\"secret\"} 0.000150956\nworkqueue_work_duration_seconds_count{name=\"secret\"} 1\n
    Example of metrics exported at the /server-metrics endpoint
    # HELP auth_server_authconfig_duration_seconds Response latency of authconfig enforced by the auth server (in seconds).\n# TYPE auth_server_authconfig_duration_seconds histogram\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.001\"} 0\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.051000000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.101\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.15100000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.201\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.251\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.301\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.351\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.40099999999999997\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.45099999999999996\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.501\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.551\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.6010000000000001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.6510000000000001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.7010000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.7510000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.8010000000000003\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.8510000000000003\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.9010000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.9510000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"+Inf\"} 1\nauth_server_authconfig_duration_seconds_sum{authconfig=\"edge-auth\",namespace=\"authorino\"} 0.001701795\nauth_server_authconfig_duration_seconds_count{authconfig=\"edge-auth\",namespace=\"authorino\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.051000000000000004\"} 4\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.101\"} 4\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.15100000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.201\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.251\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.301\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.351\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.40099999999999997\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.45099999999999996\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.501\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.551\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.6010000000000001\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.6510000000000001\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.7010000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.7510000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.8010000000000003\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.8510000000000003\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.9010000000000004\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.9510000000000004\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"+Inf\"} 5\nauth_server_authconfig_duration_seconds_sum{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 0.26967658299999997\nauth_server_authconfig_duration_seconds_count{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 5\n# HELP auth_server_authconfig_response_status Response status of authconfigs sent by the auth server, partitioned by authconfig.\n# TYPE auth_server_authconfig_response_status counter\nauth_server_authconfig_response_status{authconfig=\"edge-auth\",namespace=\"authorino\",status=\"OK\"} 1\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"OK\"} 2\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"PERMISSION_DENIED\"} 2\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"UNAUTHENTICATED\"} 1\n# HELP auth_server_authconfig_total Total number of authconfigs enforced by the auth server, partitioned by authconfig.\n# TYPE auth_server_authconfig_total counter\nauth_server_authconfig_total{authconfig=\"edge-auth\",namespace=\"authorino\"} 1\nauth_server_authconfig_total{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 5\n# HELP auth_server_evaluator_duration_seconds Response latency of individual authconfig rule evaluated by the auth server (in seconds).\n# TYPE auth_server_evaluator_duration_seconds histogram\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.001\"} 0\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.051000000000000004\"} 3\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.101\"} 3\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.15100000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.201\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.251\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.301\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.351\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.40099999999999997\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.45099999999999996\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.501\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.551\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.6010000000000001\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.6510000000000001\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.7010000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.7510000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.8010000000000003\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.8510000000000003\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.9010000000000004\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.9510000000000004\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"+Inf\"} 4\nauth_server_evaluator_duration_seconds_sum{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 0.25800055\nauth_server_evaluator_duration_seconds_count{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 4\n# HELP auth_server_evaluator_total Total number of evaluations of individual authconfig rule performed by the auth server.\n# TYPE auth_server_evaluator_total counter\nauth_server_evaluator_total{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 4\n# HELP auth_server_response_status Response status of authconfigs sent by the auth server.\n# TYPE auth_server_response_status counter\nauth_server_response_status{status=\"NOT_FOUND\"} 1\nauth_server_response_status{status=\"OK\"} 3\nauth_server_response_status{status=\"PERMISSION_DENIED\"} 2\nauth_server_response_status{status=\"UNAUTHENTICATED\"} 1\n# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime.\n# TYPE go_gc_cycles_automatic_gc_cycles_total counter\ngo_gc_cycles_automatic_gc_cycles_total 11\n# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application.\n# TYPE go_gc_cycles_forced_gc_cycles_total counter\ngo_gc_cycles_forced_gc_cycles_total 0\n# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles.\n# TYPE go_gc_cycles_total_gc_cycles_total counter\ngo_gc_cycles_total_gc_cycles_total 11\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 4.5971e-05\ngo_gc_duration_seconds{quantile=\"0.25\"} 5.69e-05\ngo_gc_duration_seconds{quantile=\"0.5\"} 0.000158594\ngo_gc_duration_seconds{quantile=\"0.75\"} 0.000324091\ngo_gc_duration_seconds{quantile=\"1\"} 0.001692423\ngo_gc_duration_seconds_sum 0.003546711\ngo_gc_duration_seconds_count 11\n# HELP go_gc_heap_allocs_by_size_bytes_total Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_by_size_bytes_total histogram\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 6261\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 42477\n[...]\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 122133\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"+Inf\"} 122154\ngo_gc_heap_allocs_by_size_bytes_total_sum 1.455944e+07\ngo_gc_heap_allocs_by_size_bytes_total_count 122154\n# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application.\n# TYPE go_gc_heap_allocs_bytes_total counter\ngo_gc_heap_allocs_bytes_total 1.455944e+07\n# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_objects_total counter\ngo_gc_heap_allocs_objects_total 122154\n# HELP go_gc_heap_frees_by_size_bytes_total Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_by_size_bytes_total histogram\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 3789\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 31067\n[...]\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 91013\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"+Inf\"} 91021\ngo_gc_heap_frees_by_size_bytes_total_sum 9.399936e+06\ngo_gc_heap_frees_by_size_bytes_total_count 91021\n# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector.\n# TYPE go_gc_heap_frees_bytes_total counter\ngo_gc_heap_frees_bytes_total 9.399936e+06\n# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_objects_total counter\ngo_gc_heap_frees_objects_total 91021\n# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle.\n# TYPE go_gc_heap_goal_bytes gauge\ngo_gc_heap_goal_bytes 9.601744e+06\n# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory.\n# TYPE go_gc_heap_objects_objects gauge\ngo_gc_heap_objects_objects 31133\n# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size.\n# TYPE go_gc_heap_tiny_allocs_objects_total counter\ngo_gc_heap_tiny_allocs_objects_total 9866\n# HELP go_gc_pauses_seconds_total Distribution individual GC-related stop-the-world pause latencies.\n# TYPE go_gc_pauses_seconds_total histogram\ngo_gc_pauses_seconds_total_bucket{le=\"9.999999999999999e-10\"} 0\ngo_gc_pauses_seconds_total_bucket{le=\"1.9999999999999997e-09\"} 0\n[...]\ngo_gc_pauses_seconds_total_bucket{le=\"206708.18602188796\"} 22\ngo_gc_pauses_seconds_total_bucket{le=\"+Inf\"} 22\ngo_gc_pauses_seconds_total_sum 0.0030393599999999996\ngo_gc_pauses_seconds_total_count 22\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 79\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.18.7\"} 1\n# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory.\n# TYPE go_memory_classes_heap_free_bytes gauge\ngo_memory_classes_heap_free_bytes 630784\n# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector.\n# TYPE go_memory_classes_heap_objects_bytes gauge\ngo_memory_classes_heap_objects_bytes 5.159504e+06\n# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory.\n# TYPE go_memory_classes_heap_released_bytes gauge\ngo_memory_classes_heap_released_bytes 3.858432e+06\n# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use.\n# TYPE go_memory_classes_heap_stacks_bytes gauge\ngo_memory_classes_heap_stacks_bytes 786432\n# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects.\n# TYPE go_memory_classes_heap_unused_bytes gauge\ngo_memory_classes_heap_unused_bytes 2.14776e+06\n# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use.\n# TYPE go_memory_classes_metadata_mcache_free_bytes gauge\ngo_memory_classes_metadata_mcache_free_bytes 13984\n# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used.\n# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge\ngo_memory_classes_metadata_mcache_inuse_bytes 2400\n# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use.\n# TYPE go_memory_classes_metadata_mspan_free_bytes gauge\ngo_memory_classes_metadata_mspan_free_bytes 16696\n# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used.\n# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge\ngo_memory_classes_metadata_mspan_inuse_bytes 114376\n# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata.\n# TYPE go_memory_classes_metadata_other_bytes gauge\ngo_memory_classes_metadata_other_bytes 5.544408e+06\n# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system.\n# TYPE go_memory_classes_os_stacks_bytes gauge\ngo_memory_classes_os_stacks_bytes 0\n# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more.\n# TYPE go_memory_classes_other_bytes gauge\ngo_memory_classes_other_bytes 537777\n# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling.\n# TYPE go_memory_classes_profiling_buckets_bytes gauge\ngo_memory_classes_profiling_buckets_bytes 1.455487e+06\n# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes.\n# TYPE go_memory_classes_total_bytes gauge\ngo_memory_classes_total_bytes 2.026804e+07\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 5.159504e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 1.455944e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.455487e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 100887\n# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.\n# TYPE go_memstats_gc_cpu_fraction gauge\ngo_memstats_gc_cpu_fraction 0\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 5.544408e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 5.159504e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 4.489216e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 7.307264e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 31133\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 3.858432e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.179648e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.6461569717723043e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 132020\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 2400\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 16384\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 114376\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 131072\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 9.601744e+06\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 537777\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 786432\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 786432\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.026804e+07\n# HELP go_sched_goroutines_goroutines Count of live goroutines.\n# TYPE go_sched_goroutines_goroutines gauge\ngo_sched_goroutines_goroutines 79\n# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running.\n# TYPE go_sched_latencies_seconds histogram\ngo_sched_latencies_seconds_bucket{le=\"9.999999999999999e-10\"} 225\ngo_sched_latencies_seconds_bucket{le=\"1.9999999999999997e-09\"} 225\n[...]\ngo_sched_latencies_seconds_bucket{le=\"206708.18602188796\"} 1916\ngo_sched_latencies_seconds_bucket{le=\"+Inf\"} 1916\ngo_sched_latencies_seconds_sum 0.18081453600000003\ngo_sched_latencies_seconds_count 1916\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 8\n# HELP grpc_server_handled_total Total number of RPCs completed on the server, regardless of success or failure.\n# TYPE grpc_server_handled_total counter\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_handling_seconds Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.\n# TYPE grpc_server_handling_seconds histogram\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.005\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.01\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.025\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.05\"} 6\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.1\"} 6\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.25\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"1\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"2.5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"10\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"+Inf\"} 7\ngrpc_server_handling_seconds_sum{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0.277605516\ngrpc_server_handling_seconds_count{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\n# HELP grpc_server_msg_received_total Total number of RPC stream messages received on the server.\n# TYPE grpc_server_msg_received_total counter\ngrpc_server_msg_received_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_msg_received_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_msg_received_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_msg_sent_total Total number of gRPC stream messages sent by the server.\n# TYPE grpc_server_msg_sent_total counter\ngrpc_server_msg_sent_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_msg_sent_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_msg_sent_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_started_total Total number of RPCs started on the server.\n# TYPE grpc_server_started_total counter\ngrpc_server_started_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_started_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_started_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP oidc_server_requests_total Number of get requests received on the OIDC (Festival Wristband) server.\n# TYPE oidc_server_requests_total counter\noidc_server_requests_total{authconfig=\"edge-auth\",namespace=\"authorino\",path=\"/.well-known/openid-configuration\",wristband=\"wristband\"} 1\noidc_server_requests_total{authconfig=\"edge-auth\",namespace=\"authorino\",path=\"/.well-known/openid-connect/certs\",wristband=\"wristband\"} 1\n# HELP oidc_server_response_status Status of HTTP response sent by the OIDC (Festival Wristband) server.\n# TYPE oidc_server_response_status counter\noidc_server_response_status{status=\"200\"} 2\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 1.42\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 1.048576e+06\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 14\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 4.370432e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.64615612779e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 7.65362176e+08\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.\n# TYPE promhttp_metric_handler_requests_in_flight gauge\npromhttp_metric_handler_requests_in_flight 1\n# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.\n# TYPE promhttp_metric_handler_requests_total counter\npromhttp_metric_handler_requests_total{code=\"200\"} 1\npromhttp_metric_handler_requests_total{code=\"500\"} 0\npromhttp_metric_handler_requests_total{code=\"503\"} 0\n
    "},{"location":"authorino/docs/user-guides/observability/#readiness-check","title":"Readiness check","text":"

    Authorino exposes two main endpoints for health and readiness check of the AuthConfig controller:

    • /healthz: Health probe (ping) \u2013 reports \"ok\" if the controller is healthy.
    • /readyz: Readiness probe \u2013 reports \"ok\" if the controller is ready to reconcile AuthConfig-related events.

    In general, the endpoints return either 200 (\"ok\", i.e. all checks have passed) or 500 (when one or more checks failed).

    The default binding network address is :8081, which can be changed by setting the command-line flag --health-probe-addr.

    The following additional subpath is available and its corresponding check can be aggregated into the response from the main readiness probe:

    • /readyz/authconfigs: Aggregated readiness status of the AuthConfigs \u2013 reports \"ok\" if all AuthConfigs watched by the reconciler have been marked as ready.
    Important!The AuthConfig readiness check within the scope of the aggregated readiness probe endpoint is deactivated by default \u2013 i.e. this check is an opt-in check. Sending a request to the /readyz endpoint without explicitly opting-in for the AuthConfigs check, by using the include parameter, will result in a response message that disregards the actual status of the watched AuthConfigs, possibly an \"ok\" message. To read the aggregated status of the watched AuthConfigs, either use the specific endpoint /readyz/authconfigs or opt-in for the check in the aggregated endpoint by sending a request to /readyz?include=authconfigs

    Apart from include to add the aggregated status of the AuthConfigs, the following additional query string parameters are available:

    • verbose=true|false - provides more verbose response messages;
    • exclude=(check name) \u2013 to exclude a particular readiness check (for future usage).
    "},{"location":"authorino/docs/user-guides/observability/#logging","title":"Logging","text":"

    Authorino provides structured log messages (\"production\") or more log messages output to stdout in a more user-friendly format (\"development\" mode) and different level of logging.

    "},{"location":"authorino/docs/user-guides/observability/#log-levels-and-log-modes","title":"Log levels and log modes","text":"

    Authorino outputs 3 levels of log messages: (from lowest to highest level)

    1. debug
    2. info (default)
    3. error

    info logging is restricted to high-level information of the gRPC and HTTP authorization services, limiting messages to incoming request and respective outgoing response logs, with reduced details about the corresponding objects (request payload and authorization result), and without any further detailed logs of the steps in between, except for errors.

    Only debug logging will include processing details of each Auth Pipeline, such as intermediary requests to validate identities with external auth servers, requests to external sources of auth metadata or authorization policies.

    To configure the desired log level, set the spec.logLevel field of the Authorino custom resource (or --log-level command-line flag in the Authorino deployment), to one of the supported values listed above. Default log level is info.

    Apart from log level, Authorino can output messages to the logs in 2 different formats:

    • production (default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
    • development: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}

    To configure the desired log mode, set the spec.logMode field of the Authorino custom resource (or --log-mode command-line flag in the Authorino deployment), to one of the supported values listed above. Default log level is production.

    Example of Authorino custom resource with log level debug and log mode production:

    apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  logLevel: debug\n  logMode: production\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\n
    "},{"location":"authorino/docs/user-guides/observability/#sensitive-data-output-to-the-logs","title":"Sensitive data output to the logs","text":"

    Authorino will never output HTTP headers and query string parameters to info log messages, as such values usually include sensitive data (e.g. access tokens, API keys and Authorino Festival Wristbands). However, debug log messages may include such sensitive information and those are not redacted.

    Therefore, DO NOT USE debug LOG LEVEL IN PRODUCTION! Instead, use either info or error.

    "},{"location":"authorino/docs/user-guides/observability/#log-messages-printed-by-authorino","title":"Log messages printed by Authorino","text":"

    Some log messages printed by Authorino and corresponding extra values included:

    logger level message extra values authorino info \"setting instance base logger\" min level=info\\|debug, mode=production\\|development authorino info \"booting up authorino\" version authorino debug \"setting up with options\" auth-config-label-selector, deep-metrics-enabled, enable-leader-election, evaluator-cache-size, ext-auth-grpc-port, ext-auth-http-port, health-probe-addr, log-level, log-mode, max-http-request-body-size, metrics-addr, oidc-http-port, oidc-tls-cert, oidc-tls-cert-key, secret-label-selector, timeout, tls-cert, tls-cert-key, watch-namespace authorino info \"attempting to acquire leader lease <namespace>/cb88a58a.authorino.kuadrant.io...\\n\" authorino info \"successfully acquired lease <namespace>/cb88a58a.authorino.kuadrant.io\\n\" authorino info \"disabling grpc auth service\" authorino info \"starting grpc auth service\" port, tls authorino error \"failed to obtain port for the grpc auth service\" authorino error \"failed to load tls cert for the grpc auth\" authorino error \"failed to start grpc auth service\" authorino info \"disabling http auth service\" authorino info \"starting http auth service\" port, tls authorino error \"failed to obtain port for the http auth service\" authorino error \"failed to start http auth service\" authorino info \"disabling http oidc service\" authorino info \"starting http oidc service\" port, tls authorino error \"failed to obtain port for the http oidc service\" authorino error \"failed to start http oidc service\" authorino info \"starting manager\" authorino error \"unable to start manager\" authorino error \"unable to create controller\" controller=authconfig\\|secret\\|authconfigstatusupdate authorino error \"problem running manager\" authorino info \"starting status update manager\" authorino error \"unable to start status update manager\" authorino error \"problem running status update manager\" authorino.controller-runtime.metrics info \"metrics server is starting to listen\" addr authorino.controller-runtime.manager info \"starting metrics server\" path authorino.controller-runtime.manager.events debug \"Normal\" object={kind=ConfigMap, apiVersion=v1}, reauthorino.ason=LeaderElection, message=\"authorino-controller-manager-* became leader\" authorino.controller-runtime.manager.events debug \"Normal\" object={kind=Lease, apiVersion=coordination.k8s.io/v1}, reauthorino.ason=LeaderElection, message=\"authorino-controller-manager-* became leader\" authorino.controller-runtime.manager.controller.authconfig info \"resource reconciled\" authconfig authorino.controller-runtime.manager.controller.authconfig info \"host already taken\" authconfig, host authorino.controller-runtime.manager.controller.authconfig.statusupdater debug \"resource status did not change\" authconfig authorino.controller-runtime.manager.controller.authconfig.statusupdater debug \"resource status changed\" authconfig, authconfig/status authorino.controller-runtime.manager.controller.authconfig.statusupdater error \"failed to update the resource\" authconfig authorino.controller-runtime.manager.controller.authconfig.statusupdater info \"resource status updated\" authconfig authorino.controller-runtime.manager.controller.secret info \"resource reconciled\" authorino.controller-runtime.manager.controller.secret info \"could not reconcile authconfigs using api key authorino.authentication\" authorino.service.oidc info \"request received\" request id, url, realm, config, path authorino.service.oidc info \"response sent\" request id authorino.service.oidc error \"failed to serve oidc request\" authorino.service.auth info \"incoming authorization request\" request id, object authorino.service.auth debug \"incoming authorization request\" request id, object authorino.service.auth info \"outgoing authorization response\" request id, authorized, response, object authorino.service.auth debug \"outgoing authorization response\" request id, authorized, response, object authorino.service.auth error \"failed to create dynamic metadata\" request id, object authorino.service.auth.authpipeline debug \"skipping config\" request id, config, reason authorino.service.auth.authpipeline.identity debug \"identity validated\" request id, config, object authorino.service.auth.authpipeline.identity debug \"cannot validate identity\" request id, config, reason authorino.service.auth.authpipeline.identity error \"failed to extend identity object\" request id, config, object authorino.service.auth.authpipeline.identity.oidc error \"failed to discovery openid connect configuration\" endpoint authorino.service.auth.authpipeline.identity.oidc debug \"auto-refresh of openid connect configuration disabled\" endpoint, reason authorino.service.auth.authpipeline.identity.oidc debug \"openid connect configuration updated\" endpoint authorino.service.auth.authpipeline.identity.oauth2 debug \"sending token introspection request\" request id, url, data authorino.service.auth.authpipeline.identity.kubernetesauth debug \"calling kubernetes token review api\" request id, tokenreview authorino.service.auth.authpipeline.identity.apikey error \"Something went wrong fetching the authorized credentials\" authorino.service.auth.authpipeline.metadata debug \"fetched auth metadata\" request id, config, object authorino.service.auth.authpipeline.metadata debug \"cannot fetch metadata\" request id, config, reason authorino.service.auth.authpipeline.metadata.http debug \"sending request\" request id, method, url, headers authorino.service.auth.authpipeline.metadata.userinfo debug \"fetching user info\" request id, endpoint authorino.service.auth.authpipeline.metadata.uma debug \"requesting pat\" request id, url, data, headers authorino.service.auth.authpipeline.metadata.uma debug \"querying resources by uri\" request id, url authorino.service.auth.authpipeline.metadata.uma debug \"getting resource data\" request id, url authorino.service.auth.authpipeline.authorization debug \"evaluating for input\" request id, input authorino.service.auth.authpipeline.authorization debug \"access granted\" request id, config, object authorino.service.auth.authpipeline.authorization debug \"access denied\" request id, config, reason authorino.service.auth.authpipeline.authorization.opa error \"invalid response from policy evaluation\" policy authorino.service.auth.authpipeline.authorization.opa error \"failed to precompile policy\" policy authorino.service.auth.authpipeline.authorization.opa error \"failed to download policy from external registry\" policy, endpoint authorino.service.auth.authpipeline.authorization.opa error \"failed to refresh policy from external registry\" policy, endpoint authorino.service.auth.authpipeline.authorization.opa debug \"external policy unchanged\" policy, endpoint authorino.service.auth.authpipeline.authorization.opa debug \"auto-refresh of external policy disabled\" policy, endpoint, reason authorino.service.auth.authpipeline.authorization.opa info \"policy updated from external registry\" policy, endpoint authorino.service.auth.authpipeline.authorization.kubernetesauthz debug \"calling kubernetes subject access review api\" request id, subjectaccessreview authorino.service.auth.authpipeline.response debug \"dynamic response built\" request id, config, object authorino.service.auth.authpipeline.response debug \"cannot build dynamic response\" request id, config, reason authorino.service.auth.http debug \"bad request\" request id authorino.service.auth.http debug \"not found\" request id authorino.service.auth.http debug \"request body too large\" request id authorino.service.auth.http debug \"service unavailable\" request id"},{"location":"authorino/docs/user-guides/observability/#examples","title":"Examples","text":"

    The examples below are all with --log-level=debug and --log-mode=production.

    Booting up the service
    {\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"setting instance base logger\",\"min level\":\"info\",\"mode\":\"production\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"build information\",\"version\":\"v1beta3\",\"commit\":\"ae2dc8150af2e6cdb35957ba7305c4c2a76d6149\",\"dirty\":\"false\",\"cmd\":\"server\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting http auth service\",\"port\":5001,\"tls\":false}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting grpc auth service\",\"port\":50051,\"tls\":false}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting http oidc service\",\"port\":8083,\"tls\":false}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting reconciliation manager\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting server\",\"kind\":\"health probe\",\"addr\":\"[::]:8081\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino.controller-runtime.metrics\",\"msg\":\"Starting metrics server\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino.controller-runtime.metrics\",\"msg\":\"Serving metrics server\",\"bindAddress\":\":8080\",\"secure\":false}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting EventSource\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\",\"source\":\"kind source: *v1beta3.AuthConfig\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting EventSource\",\"controller\":\"secret\",\"controllerGroup\":\"\",\"controllerKind\":\"Secret\",\"source\":\"kind source: *v1.Secret\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting Controller\",\"controller\":\"secret\",\"controllerGroup\":\"\",\"controllerKind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting Controller\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting status update manager\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting EventSource\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\",\"source\":\"kind source: *v1beta3.AuthConfig\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting Controller\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting workers\",\"controller\":\"secret\",\"controllerGroup\":\"\",\"controllerKind\":\"Secret\",\"worker count\":1}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting workers\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\",\"worker count\":1}\n
    Reconciling an AuthConfig and 2 related API key secrets
    {\"level\":\"debug\",\"ts\":1669221208.7473805,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status changed\",\"authconfig\":\"default/talker-api-protection\",\"authconfig/status\":{\"conditions\":[{\"type\":\"Available\",\"status\":\"False\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"HostsNotLinked\",\"message\":\"No hosts linked to the resource\"},{\"type\":\"Ready\",\"status\":\"False\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"Unknown\"}],\"summary\":{\"ready\":false,\"hostsReady\":[],\"numHostsReady\":\"0/1\",\"numIdentitySources\":1,\"numMetadataSources\":0,\"numAuthorizationPolicies\":0,\"numResponseItems\":0,\"festivalWristbandEnabled\":false}}}\n{\"level\":\"info\",\"ts\":1669221208.7496614,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"info\",\"ts\":1669221208.7532616,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.7535005,\"logger\":\"authorino.controller.secret\",\"msg\":\"adding k8s secret to the index\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-1\",\"namespace\":\"default\",\"authconfig\":\"default/talker-api-protection\",\"config\":\"friends\"}\n{\"level\":\"debug\",\"ts\":1669221208.7535596,\"logger\":\"authorino.controller.secret.apikey\",\"msg\":\"api key added\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-1\",\"namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669221208.7536132,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-1\"}\n{\"level\":\"info\",\"ts\":1669221208.753772,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.753835,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status changed\",\"authconfig\":\"default/talker-api-protection\",\"authconfig/status\":{\"conditions\":[{\"type\":\"Available\",\"status\":\"True\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"HostsLinked\"},{\"type\":\"Ready\",\"status\":\"True\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"Reconciled\"}],\"summary\":{\"ready\":true,\"hostsReady\":[\"talker-api.127.0.0.1.nip.io\"],\"numHostsReady\":\"1/1\",\"numIdentitySources\":1,\"numMetadataSources\":0,\"numAuthorizationPolicies\":0,\"numResponseItems\":0,\"festivalWristbandEnabled\":false}}}\n{\"level\":\"info\",\"ts\":1669221208.7571108,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"info\",\"ts\":1669221208.7573664,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.757429,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status did not change\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.7586699,\"logger\":\"authorino.controller.secret\",\"msg\":\"adding k8s secret to the index\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-2\",\"namespace\":\"default\",\"authconfig\":\"default/talker-api-protection\",\"config\":\"friends\"}\n{\"level\":\"debug\",\"ts\":1669221208.7586884,\"logger\":\"authorino.controller.secret.apikey\",\"msg\":\"api key added\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-2\",\"namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669221208.7586913,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-2\"}\n{\"level\":\"debug\",\"ts\":1669221208.7597604,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status did not change\",\"authconfig\":\"default/talker-api-protection\"}\n
    Enforcing an AuthConfig with authentication based on Kubernetes tokens:

    • identity: k8s-auth, oidc, oauth2, apikey
    • metadata: http, oidc userinfo
    • authorization: opa, k8s-authz
    • response: wristband
    {\"level\":\"info\",\"ts\":1634830460.1486168,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"8157480586935853928\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830460.1491194,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"8157480586935853928\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830460,\"nanos\":147259000},\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"4c5d5c97-e15b-46a3-877a-d8188e09e08f\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830460.150506,\"logger\":\"authorino.service.auth.authpipeline.identity.kubernetesauth\",\"msg\":\"calling kubernetes token review api\",\"request id\":\"8157480586935853928\",\"tokenreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"token\":\"eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"audiences\":[\"talker-api\"]},\"status\":{\"user\":{}}}}\n{\"level\":\"debug\",\"ts\":1634830460.1509938,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830460.1517606,\"logger\":\"authorino.service.auth.authpipeline.identity.oauth2\",\"msg\":\"sending token introspection request\",\"request id\":\"8157480586935853928\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token/introspect\",\"data\":\"token=eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA&token_type_hint=requesting_party_token\"}\n{\"level\":\"debug\",\"ts\":1634830460.1620777,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"identity validated\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"aud\":[\"talker-api\"],\"exp\":1634831051,\"iat\":1634830451,\"iss\":\"https://kubernetes.default.svc.cluster.local\",\"kubernetes.io\":{\"namespace\":\"authorino\",\"serviceaccount\":{\"name\":\"api-consumer-1\",\"uid\":\"b40f531c-ecab-4f31-a496-2ebc72add121\"}},\"nbf\":1634830451,\"sub\":\"system:serviceaccount:authorino:api-consumer-1\"}}\n{\"level\":\"debug\",\"ts\":1634830460.1622565,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"requesting pat\",\"request id\":\"8157480586935853928\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token\",\"data\":\"grant_type=client_credentials\",\"headers\":{\"Content-Type\":[\"application/x-www-form-urlencoded\"]}}\n{\"level\":\"debug\",\"ts\":1634830460.1670353,\"logger\":\"authorino.service.auth.authpipeline.metadata.http\",\"msg\":\"sending request\",\"request id\":\"8157480586935853928\",\"method\":\"GET\",\"url\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path=/hello\",\"headers\":{\"Content-Type\":[\"text/plain\"]}}\n{\"level\":\"debug\",\"ts\":1634830460.169326,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"cannot fetch metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"oidc-userinfo\",\"UserInfo\":{\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"}},\"UMA\":null,\"GenericHTTP\":null},\"reason\":\"Missing identity for OIDC issuer http://keycloak:8080/realms/kuadrant. Skipping related UserInfo metadata.\"}\n{\"level\":\"debug\",\"ts\":1634830460.1753876,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"http-metadata\",\"UserInfo\":null,\"UMA\":null,\"GenericHTTP\":{\"Endpoint\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path={context.request.http.path}\",\"Method\":\"GET\",\"Parameters\":[],\"ContentType\":\"application/x-www-form-urlencoded\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"1aa6ac66-3179-4351-b1a7-7f6a761d5b61\"}}\n{\"level\":\"debug\",\"ts\":1634830460.2331996,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"querying resources by uri\",\"request id\":\"8157480586935853928\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set?uri=/hello\"}\n{\"level\":\"debug\",\"ts\":1634830460.2495668,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"getting resource data\",\"request id\":\"8157480586935853928\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set/e20d194c-274c-4845-8c02-0ca413c9bf18\"}\n{\"level\":\"debug\",\"ts\":1634830460.2927864,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"uma-resource-registry\",\"UserInfo\":null,\"UMA\":{\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"GenericHTTP\":null},\"object\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}\n{\"level\":\"debug\",\"ts\":1634830460.2930083,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"evaluating for input\",\"request id\":\"8157480586935853928\",\"input\":{\"context\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830460,\"nanos\":147259000},\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"4c5d5c97-e15b-46a3-877a-d8188e09e08f\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}},\"auth\":{\"identity\":{\"aud\":[\"talker-api\"],\"exp\":1634831051,\"iat\":1634830451,\"iss\":\"https://kubernetes.default.svc.cluster.local\",\"kubernetes.io\":{\"namespace\":\"authorino\",\"serviceaccount\":{\"name\":\"api-consumer-1\",\"uid\":\"b40f531c-ecab-4f31-a496-2ebc72add121\"}},\"nbf\":1634830451,\"sub\":\"system:serviceaccount:authorino:api-consumer-1\"},\"metadata\":{\"http-metadata\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"1aa6ac66-3179-4351-b1a7-7f6a761d5b61\"},\"uma-resource-registry\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}}}}\n{\"level\":\"debug\",\"ts\":1634830460.2955465,\"logger\":\"authorino.service.auth.authpipeline.authorization.kubernetesauthz\",\"msg\":\"calling kubernetes subject access review api\",\"request id\":\"8157480586935853928\",\"subjectaccessreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"nonResourceAttributes\":{\"path\":\"/hello\",\"verb\":\"get\"},\"user\":\"system:serviceaccount:authorino:api-consumer-1\"},\"status\":{\"allowed\":false}}}\n{\"level\":\"debug\",\"ts\":1634830460.2986183,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"my-policy\",\"OPA\":{\"Rego\":\"fail := input.context.request.http.headers[\\\"x-ext-auth-mock\\\"] == \\\"FAIL\\\"\\nallow { not fail }\\n\",\"OPAExternalSource\":{\"Endpoint\":\"\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"JSON\":null,\"KubernetesAuthz\":null},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830460.3044975,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"kubernetes-rbac\",\"OPA\":null,\"JSON\":null,\"KubernetesAuthz\":{\"Conditions\":[],\"User\":{\"Static\":\"\",\"Pattern\":\"auth.identity.user.username\"},\"Groups\":null,\"ResourceAttributes\":null}},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830460.3052874,\"logger\":\"authorino.service.auth.authpipeline.response\",\"msg\":\"dynamic response built\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"wristband\",\"Wrapper\":\"httpHeader\",\"WrapperKey\":\"x-ext-auth-wristband\",\"Wristband\":{\"Issuer\":\"https://authorino-oidc.default.svc:8083/default/talker-api-protection/wristband\",\"CustomClaims\":[],\"TokenDuration\":300,\"SigningKeys\":[{\"use\":\"sig\",\"kty\":\"EC\",\"kid\":\"wristband-signing-key\",\"crv\":\"P-256\",\"alg\":\"ES256\",\"x\":\"TJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZw\",\"y\":\"SSg8rKBsJ3J1LxyLtt0oFvhHvZcUpmRoTuHk3UHisTA\",\"d\":\"Me-5_zWBWVYajSGZcZMCcD8dXEa4fy85zv_yN7BxW-o\"}]},\"DynamicJSON\":null},\"object\":\"eyJhbGciOiJFUzI1NiIsImtpZCI6IndyaXN0YmFuZC1zaWduaW5nLWtleSIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzQ4MzA3NjAsImlhdCI6MTYzNDgzMDQ2MCwiaXNzIjoiaHR0cHM6Ly9hdXRob3Jpbm8tb2lkYy5hdXRob3Jpbm8uc3ZjOjgwODMvYXV0aG9yaW5vL3RhbGtlci1hcGktcHJvdGVjdGlvbi93cmlzdGJhbmQiLCJzdWIiOiI4NDliMDk0ZDA4MzU0ZjM0MjA4ZGI3MjBmYWZmODlmNmM3NmYyOGY3MTcxOWI4NTQ3ZDk5NWNlNzAwMjU2ZGY4In0.Jn-VB5Q_0EX1ed1ji4KvhO4DlMqZeIl5H0qlukbTyYkp-Pgb4SnPGSbYWp5_uvG8xllsFAA5nuyBIXeba-dbkw\"}\n{\"level\":\"info\",\"ts\":1634830460.3054585,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"8157480586935853928\",\"authorized\":true,\"response\":\"OK\"}\n{\"level\":\"debug\",\"ts\":1634830460.305476,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"8157480586935853928\",\"authorized\":true,\"response\":\"OK\"}\n
    Enforcing an AuthConfig with authentication based on API keys

    • identity: k8s-auth, oidc, oauth2, apikey
    • metadata: http, oidc userinfo
    • authorization: opa, k8s-authz
    • response: wristband
    {\"level\":\"info\",\"ts\":1634830413.2425854,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"7199257136822741594\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830413.2426975,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"7199257136822741594\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830413,\"nanos\":240094000},\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"d38f5e66-bd72-4733-95d1-3179315cdd60\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830413.2428744,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830413.2434332,\"logger\":\"authorino.service.auth.authpipeline\",\"msg\":\"skipping config\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"keycloak-jwts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"},\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"context canceled\"}\n{\"level\":\"debug\",\"ts\":1634830413.2479305,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"identity validated\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"object\":{\"apiVersion\":\"v1\",\"data\":{\"api_key\":\"bmR5QnpyZVV6RjR6cURRc3FTUE1Ia1JocmlFT3RjUng=\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Secret\\\",\\\"metadata\\\":{\\\"annotations\\\":{\\\"userid\\\":\\\"john\\\"},\\\"labels\\\":{\\\"audience\\\":\\\"talker-api\\\",\\\"authorino.kuadrant.io/managed-by\\\":\\\"authorino\\\"},\\\"name\\\":\\\"api-key-1\\\",\\\"namespace\\\":\\\"authorino\\\"},\\\"stringData\\\":{\\\"api_key\\\":\\\"ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\\\"},\\\"type\\\":\\\"Opaque\\\"}\\n\",\"userid\":\"john\"},\"creationTimestamp\":\"2021-10-21T14:45:54Z\",\"labels\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"},\"managedFields\":[{\"apiVersion\":\"v1\",\"fieldsType\":\"FieldsV1\",\"fieldsV1\":{\"f:data\":{\".\":{},\"f:api_key\":{}},\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:kubectl.kubernetes.io/last-applied-configuration\":{},\"f:userid\":{}},\"f:labels\":{\".\":{},\"f:audience\":{},\"f:authorino.kuadrant.io/managed-by\":{}}},\"f:type\":{}},\"manager\":\"kubectl-client-side-apply\",\"operation\":\"Update\",\"time\":\"2021-10-21T14:45:54Z\"}],\"name\":\"api-key-1\",\"namespace\":\"authorino\",\"resourceVersion\":\"8979\",\"uid\":\"c369852a-7e1a-43bd-94ca-e2b3f617052e\"},\"sub\":\"john\",\"type\":\"Opaque\"}}\n{\"level\":\"debug\",\"ts\":1634830413.248768,\"logger\":\"authorino.service.auth.authpipeline.metadata.http\",\"msg\":\"sending request\",\"request id\":\"7199257136822741594\",\"method\":\"GET\",\"url\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path=/hello\",\"headers\":{\"Content-Type\":[\"text/plain\"]}}\n{\"level\":\"debug\",\"ts\":1634830413.2496722,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"cannot fetch metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"oidc-userinfo\",\"UserInfo\":{\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"}},\"UMA\":null,\"GenericHTTP\":null},\"reason\":\"Missing identity for OIDC issuer http://keycloak:8080/realms/kuadrant. Skipping related UserInfo metadata.\"}\n{\"level\":\"debug\",\"ts\":1634830413.2497928,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"requesting pat\",\"request id\":\"7199257136822741594\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token\",\"data\":\"grant_type=client_credentials\",\"headers\":{\"Content-Type\":[\"application/x-www-form-urlencoded\"]}}\n{\"level\":\"debug\",\"ts\":1634830413.258932,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"http-metadata\",\"UserInfo\":null,\"UMA\":null,\"GenericHTTP\":{\"Endpoint\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path={context.request.http.path}\",\"Method\":\"GET\",\"Parameters\":[],\"ContentType\":\"application/x-www-form-urlencoded\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"97529f8c-587b-4121-a4db-cd90c63871fd\"}}\n{\"level\":\"debug\",\"ts\":1634830413.2945344,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"querying resources by uri\",\"request id\":\"7199257136822741594\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set?uri=/hello\"}\n{\"level\":\"debug\",\"ts\":1634830413.3123596,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"getting resource data\",\"request id\":\"7199257136822741594\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set/e20d194c-274c-4845-8c02-0ca413c9bf18\"}\n{\"level\":\"debug\",\"ts\":1634830413.3340268,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"uma-resource-registry\",\"UserInfo\":null,\"UMA\":{\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"GenericHTTP\":null},\"object\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}\n{\"level\":\"debug\",\"ts\":1634830413.3367748,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"evaluating for input\",\"request id\":\"7199257136822741594\",\"input\":{\"context\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830413,\"nanos\":240094000},\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"d38f5e66-bd72-4733-95d1-3179315cdd60\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}},\"auth\":{\"identity\":{\"apiVersion\":\"v1\",\"data\":{\"api_key\":\"bmR5QnpyZVV6RjR6cURRc3FTUE1Ia1JocmlFT3RjUng=\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Secret\\\",\\\"metadata\\\":{\\\"annotations\\\":{\\\"userid\\\":\\\"john\\\"},\\\"labels\\\":{\\\"audience\\\":\\\"talker-api\\\",\\\"authorino.kuadrant.io/managed-by\\\":\\\"authorino\\\"},\\\"name\\\":\\\"api-key-1\\\",\\\"namespace\\\":\\\"authorino\\\"},\\\"stringData\\\":{\\\"api_key\\\":\\\"ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\\\"},\\\"type\\\":\\\"Opaque\\\"}\\n\",\"userid\":\"john\"},\"creationTimestamp\":\"2021-10-21T14:45:54Z\",\"labels\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"},\"managedFields\":[{\"apiVersion\":\"v1\",\"fieldsType\":\"FieldsV1\",\"fieldsV1\":{\"f:data\":{\".\":{},\"f:api_key\":{}},\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:kubectl.kubernetes.io/last-applied-configuration\":{},\"f:userid\":{}},\"f:labels\":{\".\":{},\"f:audience\":{},\"f:authorino.kuadrant.io/managed-by\":{}}},\"f:type\":{}},\"manager\":\"kubectl-client-side-apply\",\"operation\":\"Update\",\"time\":\"2021-10-21T14:45:54Z\"}],\"name\":\"api-key-1\",\"namespace\":\"authorino\",\"resourceVersion\":\"8979\",\"uid\":\"c369852a-7e1a-43bd-94ca-e2b3f617052e\"},\"sub\":\"john\",\"type\":\"Opaque\"},\"metadata\":{\"http-metadata\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"97529f8c-587b-4121-a4db-cd90c63871fd\"},\"uma-resource-registry\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}}}}\n{\"level\":\"debug\",\"ts\":1634830413.339894,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"my-policy\",\"OPA\":{\"Rego\":\"fail := input.context.request.http.headers[\\\"x-ext-auth-mock\\\"] == \\\"FAIL\\\"\\nallow { not fail }\\n\",\"OPAExternalSource\":{\"Endpoint\":\"\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"JSON\":null,\"KubernetesAuthz\":null},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830413.3444238,\"logger\":\"authorino.service.auth.authpipeline.authorization.kubernetesauthz\",\"msg\":\"calling kubernetes subject access review api\",\"request id\":\"7199257136822741594\",\"subjectaccessreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"nonResourceAttributes\":{\"path\":\"/hello\",\"verb\":\"get\"},\"user\":\"john\"},\"status\":{\"allowed\":false}}}\n{\"level\":\"debug\",\"ts\":1634830413.3547812,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"kubernetes-rbac\",\"OPA\":null,\"JSON\":null,\"KubernetesAuthz\":{\"Conditions\":[],\"User\":{\"Static\":\"\",\"Pattern\":\"auth.identity.user.username\"},\"Groups\":null,\"ResourceAttributes\":null}},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830413.3558292,\"logger\":\"authorino.service.auth.authpipeline.response\",\"msg\":\"dynamic response built\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"wristband\",\"Wrapper\":\"httpHeader\",\"WrapperKey\":\"x-ext-auth-wristband\",\"Wristband\":{\"Issuer\":\"https://authorino-oidc.default.svc:8083/default/talker-api-protection/wristband\",\"CustomClaims\":[],\"TokenDuration\":300,\"SigningKeys\":[{\"use\":\"sig\",\"kty\":\"EC\",\"kid\":\"wristband-signing-key\",\"crv\":\"P-256\",\"alg\":\"ES256\",\"x\":\"TJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZw\",\"y\":\"SSg8rKBsJ3J1LxyLtt0oFvhHvZcUpmRoTuHk3UHisTA\",\"d\":\"Me-5_zWBWVYajSGZcZMCcD8dXEa4fy85zv_yN7BxW-o\"}]},\"DynamicJSON\":null},\"object\":\"eyJhbGciOiJFUzI1NiIsImtpZCI6IndyaXN0YmFuZC1zaWduaW5nLWtleSIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzQ4MzA3MTMsImlhdCI6MTYzNDgzMDQxMywiaXNzIjoiaHR0cHM6Ly9hdXRob3Jpbm8tb2lkYy5hdXRob3Jpbm8uc3ZjOjgwODMvYXV0aG9yaW5vL3RhbGtlci1hcGktcHJvdGVjdGlvbi93cmlzdGJhbmQiLCJzdWIiOiI5NjhiZjViZjk3MDM3NWRiNjE0ZDFhMDgzZTg2NTBhYTVhMGVhMzAyOTdiYmJjMTBlNWVlMWZmYTkxYTYwZmY4In0.7G440sWgi2TIaxrGJf5KWR9UOFpNTjwVYeaJXFLzsLhVNICoMLbYzBAEo4M3ym1jipxxTVeE7anm4qDDc7cnVQ\"}\n{\"level\":\"info\",\"ts\":1634830413.3569078,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"7199257136822741594\",\"authorized\":true,\"response\":\"OK\"}\n{\"level\":\"debug\",\"ts\":1634830413.3569596,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"7199257136822741594\",\"authorized\":true,\"response\":\"OK\"}\n
    Enforcing an AuthConfig with authentication based on API keys (invalid API key)

    • identity: k8s-auth, oidc, oauth2, apikey
    • metadata: http, oidc userinfo
    • authorization: opa, k8s-authz
    • response: wristband
    {\"level\":\"info\",\"ts\":1634830373.2066543,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"12947265773116138711\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52288}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"12947265773116138711\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830373.2068064,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"12947265773116138711\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52288}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830373,\"nanos\":198329000},\"http\":{\"id\":\"12947265773116138711\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY invalid\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"9e391846-afe4-489a-8716-23a2e1c1aa77\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830373.2070816,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"keycloak-opaque\",\"ExtendedProperties\":[],\"OAuth2\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"TokenIntrospectionUrl\":\"http://keycloak:8080/realms/kuadrant/protocol/openid-connect/token/introspect\",\"TokenTypeHint\":\"requesting_party_token\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830373.207225,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"reason\":\"the API Key provided is invalid\"}\n{\"level\":\"debug\",\"ts\":1634830373.2072473,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830373.2072592,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"keycloak-jwts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"},\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"info\",\"ts\":1634830373.2073083,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"12947265773116138711\",\"authorized\":false,\"response\":\"UNAUTHENTICATED\",\"object\":{\"code\":16,\"status\":302,\"message\":\"Redirecting to login\"}}\n{\"level\":\"debug\",\"ts\":1634830373.2073889,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"12947265773116138711\",\"authorized\":false,\"response\":\"UNAUTHENTICATED\",\"object\":{\"code\":16,\"status\":302,\"message\":\"Redirecting to login\",\"headers\":[{\"Location\":\"https://my-app.io/login\"}]}}\n
    Deleting an AuthConfig and 2 related API key secrets
    {\"level\":\"info\",\"ts\":1669221361.5032296,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-1\"}\n{\"level\":\"info\",\"ts\":1669221361.5057878,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-2\"}\n
    Shutting down the service
    {\"level\":\"info\",\"ts\":1669221635.0135982,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for non leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0136683,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0135982,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for non leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0136883,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0137057,\"logger\":\"authorino.controller.secret\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669221635.013724,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.01375,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"All workers finished\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.013752,\"logger\":\"authorino.controller.secret\",\"msg\":\"All workers finished\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669221635.0137632,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for caches\"}\n{\"level\":\"info\",\"ts\":1669221635.013751,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.0137684,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"All workers finished\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.0137722,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for caches\"}\n{\"level\":\"info\",\"ts\":1669221635.0138857,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for webhooks\"}\n{\"level\":\"info\",\"ts\":1669221635.0138955,\"logger\":\"authorino\",\"msg\":\"Wait completed, proceeding to shutdown the manager\"}\n{\"level\":\"info\",\"ts\":1669221635.0138893,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for webhooks\"}\n{\"level\":\"info\",\"ts\":1669221635.0139785,\"logger\":\"authorino\",\"msg\":\"Wait completed, proceeding to shutdown the manager\"}\n
    "},{"location":"authorino/docs/user-guides/observability/#tracing","title":"Tracing","text":""},{"location":"authorino/docs/user-guides/observability/#request-id","title":"Request ID","text":"

    Processes related to the authorization request are identified and linked together by a request ID. The request ID can be:

    • generated outside Authorino and passed in the authorization request \u2013 this is essentially the case of requests via GRPC authorization interface initiated by the Envoy;
    • generated by Authorino \u2013 requests via Raw HTTP Authorization interface.
    "},{"location":"authorino/docs/user-guides/observability/#propagation","title":"Propagation","text":"

    Authorino propagates trace identifiers compatible with the W3C Trace Context format https://www.w3.org/TR/trace-context/ and user-defined baggage data in the W3C Baggage format https://www.w3.org/TR/baggage.

    "},{"location":"authorino/docs/user-guides/observability/#log-tracing","title":"Log tracing","text":"

    Most log messages associated with an authorization request include the request id value. This value can be used to match incoming request and corresponding outgoing response log messages, including at deep level when more fine-grained log details are enabled (debug level level).

    "},{"location":"authorino/docs/user-guides/observability/#opentelemetry-integration","title":"OpenTelemetry integration","text":"

    Integration with an OpenTelemetry collector can be enabled by supplying the --tracing-service-endpoint command-line flag (e.g. authorino server --tracing-service-endpoint=http://jaeger:14268/api/traces).

    The additional --tracing-service-tags command-line flag allow to specify fixed agent-level key-value tags for the trace signals emitted by Authorino (e.g. authorino server --tracing-service-endpoint=... --tracing-service-tag=key1=value1 --tracing-service-tag=key2=value2).

    Traces related to authorization requests are additionally tagged with the authorino.request_id attribute.

    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/","title":"User guide: OpenID Connect Discovery and authentication with JWTs","text":"

    Validate JSON Web Tokens (JWT) issued and signed by an OpenID Connect server; leverage OpenID Connect Discovery to automatically fetch JSON Web Key Sets (JWKS).

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 JWT verification

    Authorino validates JSON Web Tokens (JWT) issued by an OpenID Connect server that implements OpenID Connect Discovery. Authorino fetches the OpenID Connect configuration and JSON Web Key Set (JWKS) from the issuer endpoint, and verifies the JSON Web Signature (JWS) and time validity of the token.

    Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
    • jq, to extract parts of JSON responses

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\nEOF\n
    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#consume-the-api","title":"\u277c Consume the API","text":"

    With a valid access token:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    With missing or invalid access token:

    curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak-kuadrant-realm\"\n# x-ext-auth-reason: credential not found\n
    "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/oidc-rbac/","title":"User guide: OpenID Connect (OIDC) and Role-Based Access Control (RBAC) with Authorino and Keycloak","text":"

    Combine OpenID Connect (OIDC) authentication and Role-Based Access Control (RBAC) authorization rules leveraging Keycloak and Authorino working together.

    In this user guide, you will learn via example how to implement a simple Role-Based Access Control (RBAC) system to protect endpoints of an API, with roles assigned to users of an Identity Provider (Keycloak) and carried within the access tokens as JSON Web Token (JWT) claims. Users authenticate with the IdP via OAuth2/OIDC flow and get their access tokens verified and validated by Authorino on every request. Moreover, Authorino reads the role bindings of the user and enforces the proper RBAC rules based upon the context.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 JWT verification
    • Authorization \u2192 Pattern-matching authorization

    Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/oidc-rbac/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
    • jq, to extract parts of JSON responses

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/oidc-rbac/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/oidc-rbac/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/oidc-rbac/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/oidc-rbac/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/oidc-rbac/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    In this example, the Keycloak realm defines a few users and 2 realm roles: 'member' and 'admin'. When users authenticate to the Keycloak server by any of the supported OAuth2/OIDC flows, Keycloak adds to the access token JWT a claim \"realm_access\": { \"roles\": array } that holds the list of roles assigned to the user. Authorino will verify the JWT on requests to the API and read from that claim to enforce the following RBAC rules:

    Path Method Role /resources[/*] GET / POST / PUT member /resources/{id} DELETE admin /admin[/*] * admin Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.

    Apply the AuthConfig:

    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n\n  patterns:\n    \"member-role\":\n\n    - selector: auth.identity.realm_access.roles\n      operator: incl\n      value: member\n    \"admin-role\":\n    - selector: auth.identity.realm_access.roles\n      operator: incl\n      value: admin\n\n  authorization:\n    # RBAC rule: 'member' role required for requests to /resources[/*]\n    \"rbac-resources-api\":\n      when:\n\n      - selector: context.request.http.path\n        operator: matches\n        value: ^/resources(/.*)?$\n      patternMatching:\n        patterns:\n        - patternRef: member-role\n\n    # RBAC rule: 'admin' role required for DELETE requests to /resources/{id}\n    \"rbac-delete-resource\":\n      when:\n\n      - selector: context.request.http.path\n        operator: matches\n        value: ^/resources/\\d+$\n      - selector: context.request.http.method\n        operator: eq\n        value: DELETE\n      patternMatching:\n        patterns:\n        - patternRef: admin-role\n\n    # RBAC rule: 'admin' role required for requests to /admin[/*]\n    \"rbac-admin-api\":\n      when:\n\n      - selector: context.request.http.path\n        operator: matches\n        value: ^/admin(/.*)?$\n      patternMatching:\n        patterns:\n        - patternRef: admin-role\nEOF\n
    "},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"

    Obtain an access token with the Keycloak server for John:

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster for the user John, who is assigned to the 'member' role:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    As John, send a GET request to /resources:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/resources -i\n# HTTP/1.1 200 OK\n

    As John, send a DELETE request to /resources/123:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/resources/123 -i\n# HTTP/1.1 403 Forbidden\n

    As John, send a GET request to /admin/settings:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/admin/settings -i\n# HTTP/1.1 403 Forbidden\n
    "},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api-as-jane-memberadmin","title":"Obtain an access token and consume the API as Jane (member/admin)","text":"

    Obtain an access token from within the cluster for the user Jane, who is assigned to the 'member' and 'admin' roles:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    As Jane, send a GET request to /resources:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/resources -i\n# HTTP/1.1 200 OK\n

    As Jane, send a DELETE request to /resources/123:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/resources/123 -i\n# HTTP/1.1 200 OK\n

    As Jane, send a GET request to /admin/settings:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/admin/settings -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/oidc-rbac/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/oidc-user-info/","title":"User guide: OpenID Connect UserInfo","text":"

    Fetch user info for OpenID Connect ID tokens in request-time for extra metadata for your policies and online verification of token validity.

    Authorino capabilities featured in this guide:
    • External auth metadata \u2192 OIDC UserInfo
    • Identity verification & authentication \u2192 JWT verification
    • Authorization \u2192 Pattern-matching authorization

    Apart from possibly complementing information of the JWT, fetching OpenID Connect UserInfo in request-time can be particularly useful for remote checking the state of the session, as opposed to only verifying the JWT/JWS offline. Implementation requires an OpenID Connect issuer (spec.identity.oidc) configured in the same AuthConfig.

    Check out as well the user guide about OpenID Connect Discovery and authentication with JWTs.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/oidc-user-info/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
    • jq, to extract parts of JSON responses

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/oidc-user-info/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/oidc-user-info/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/oidc-user-info/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/oidc-user-info/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/oidc-user-info/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  metadata:\n    \"userinfo\":\n      userInfo:\n        identitySource: keycloak-kuadrant-realm\n  authorization:\n    \"active-tokens-only\":\n      patternMatching:\n        patterns:\n        - selector: \"auth.metadata.userinfo.email\" # user email expected from the userinfo instead of the jwt\n          operator: neq\n          value: \"\"\nEOF\n
    "},{"location":"authorino/docs/user-guides/oidc-user-info/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster:

    export $(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r '\"ACCESS_TOKEN=\"+.access_token,\"REFRESH_TOKEN=\"+.refresh_token')\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    "},{"location":"authorino/docs/user-guides/oidc-user-info/#consume-the-api","title":"\u277c Consume the API","text":"

    With a valid access token:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    Revoke the access token and try to consume the API again:

    kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/logout -H \"Content-Type: application/x-www-form-urlencoded\" -d \"refresh_token=$REFRESH_TOKEN\" -d 'token_type_hint=requesting_party_token' -u demo:\n
    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n
    "},{"location":"authorino/docs/user-guides/oidc-user-info/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/opa-authorization/","title":"User guide: Open Policy Agent (OPA) Rego policies","text":"

    Leverage the power of Open Policy Agent (OPA) policies, evaluated against Authorino's Authorization JSON in a built-in runtime compiled together with Authorino; pre-cache policies defined in Rego language inline or fetched from an external policy registry.

    Authorino capabilities featured in this guide:
    • Authorization \u2192 Open Policy Agent (OPA) Rego policies
    • Identity verification & authentication \u2192 API key

    Authorino supports Open Policy Agent policies, either inline defined in Rego language as part of the AuthConfig or fetched from an external endpoint, such as an OPA Policy Registry.

    Authorino's built-in OPA module precompiles the policies in reconciliation-time and cache them for fast evaluation in request-time, where they receive the Authorization JSON as input.

    Check out as well the user guide about Authentication with API keys.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/opa-authorization/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/opa-authorization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/opa-authorization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/opa-authorization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/opa-authorization/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/opa-authorization/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    In this example, we will use OPA to implement a read-only policy for requests coming from outside a trusted network (IP range 192.168.1/24).

    The implementation relies on the X-Forwarded-For HTTP header to read the client's IP address.5

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  authorization:\n    \"read-only-outside\":\n      opa:\n        rego: |\n          ips := split(input.context.request.http.headers[\"x-forwarded-for\"], \",\")\n          trusted_network { net.cidr_contains(\"192.168.1.1/24\", ips[0]) }\n\n          allow { trusted_network }\n          allow { not trusted_network; input.context.request.http.method == \"GET\" }\nEOF\n
    "},{"location":"authorino/docs/user-guides/opa-authorization/#create-the-api-key","title":"\u277b Create the API key","text":"
    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/opa-authorization/#consume-the-api","title":"\u277c Consume the API","text":"

    Inside the trusted network:

    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 192.168.1.10' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 192.168.1.10' \\\n     -X POST \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    Outside the trusted network:

    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 123.45.6.78' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 123.45.6.78' \\\n     -X POST \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
    "},{"location":"authorino/docs/user-guides/opa-authorization/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    5. You can also set use_remote_address: true in the Envoy route configuration, so the proxy will append its IP address instead of run in transparent mode. This setting will also ensure real remote address of the client connection passed in the x-envoy-external-address HTTP header, which can be used to simplify the read-only policy in remote environment.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/passing-credentials/","title":"User guide: Passing credentials (Authorization header, cookie headers and others)","text":"

    Customize where credentials are supplied in the request by each trusted source of identity.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 Auth credentials
    • Identity verification & authentication \u2192 API key

    Authentication tokens can be supplied in the Authorization header, in a custom header, cookie or query string parameter.

    Check out as well the user guide about Authentication with API keys.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/passing-credentials/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/passing-credentials/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/passing-credentials/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/passing-credentials/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/passing-credentials/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/passing-credentials/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    In this example, member users can authenticate supplying the API key in any of 4 different ways:

    • HTTP header Authorization: APIKEY <api-key>
    • HTTP header X-API-Key: <api-key>
    • Query string parameter api_key=<api-key>
    • Cookie Cookie: APIKEY=<api-key>;

    admin API keys are only accepted in the (default) HTTP header Authorization: Bearer <api-key>.

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"members-authorization-header\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: members\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY # instead of the default prefix 'Bearer'\n    \"members-custom-header\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: members\n      credentials:\n        customHeader:\n          name: X-API-Key\n    \"members-query-string-param\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: members\n      credentials:\n        queryString:\n          name: api_key\n    \"members-cookie\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: members\n      credentials:\n        cookie:\n          name: APIKEY\n    \"admins\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: admins\nEOF\n
    "},{"location":"authorino/docs/user-guides/passing-credentials/#create-the-api-keys","title":"\u277b Create the API keys","text":"

    For a member user:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: members\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n

    For an admin user:

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-2\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: admins\nstringData:\n  api_key: 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/passing-credentials/#consume-the-api","title":"\u277c Consume the API","text":"

    As member user, passing the API key in the Authorization header:

    curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    As member user, passing the API key in the custom X-API-Key header:

    curl -H 'X-API-Key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    As member user, passing the API key in the query string parameter api_key:

    curl \"http://talker-api.127.0.0.1.nip.io:8000/hello?api_key=ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\"\n# HTTP/1.1 200 OK\n

    As member user, passing the API key in the APIKEY cookie header:

    curl -H 'Cookie: APIKEY=ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx;foo=bar' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    As admin user:

    curl -H 'Authorization: Bearer 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

    Missing the API key:

    curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"members-authorization-header\"\n# www-authenticate: X-API-Key realm=\"members-custom-header\"\n# www-authenticate: api_key realm=\"members-query-string-param\"\n# www-authenticate: APIKEY realm=\"members-cookie\"\n# www-authenticate: Bearer realm=\"admins\"\n# x-ext-auth-reason: {\"admins\":\"credential not found\",\"members-authorization-header\":\"credential not found\",\"members-cookie\":\"credential not found\",\"members-custom-header\":\"credential not found\",\"members-query-string-param\":\"credential not found\"}\n
    "},{"location":"authorino/docs/user-guides/passing-credentials/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete secret/api-key-1\nkubectl delete secret/api-key-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/","title":"User guide: Resource-level authorization with User-Managed Access (UMA) resource registry","text":"

    Fetch resource metadata relevant for your authorization policies from Keycloak authorization clients, using User-Managed Access (UMA) protocol.

    Authorino capabilities featured in this guide:
    • External auth metadata \u2192 User-Managed Access (UMA) resource registry
    • Identity verification & authentication \u2192 JWT verification
    • Authorization \u2192 Open Policy Agent (OPA) Rego policies

    Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Open Policy Agent (OPA) Rego policies.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
    • jq, to extract parts of JSON responses

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

    This example of resource-level authorization leverages part of Keycloak's User-Managed Access (UMA) support. Authorino will fetch resource attributes stored in a Keycloak resource server client.

    The Keycloak server also provides the identities. The sub claim of the Keycloak-issued ID tokens must match the owner of the requested resource, identified by the URI of the request.

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.

    Create a required secret that will be used by Authorino to initiate the authentication with the UMA registry.

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: talker-api-uma-credentials\nstringData:\n  clientID: talker-api\n  clientSecret: 523b92b6-625d-4e1e-a313-77e7a8ae4e88\ntype: Opaque\nEOF\n

    Create the config:

    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  metadata:\n    \"resource-data\":\n      uma:\n        endpoint: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n        credentialsRef:\n          name: talker-api-uma-credentials\n  authorization:\n    \"owned-resources\":\n      opa:\n        rego: |\n          COLLECTIONS = [\"greetings\"]\n\n          http_request = input.context.request.http\n          http_method = http_request.method\n          requested_path_sections = split(trim_left(trim_right(http_request.path, \"/\"), \"/\"), \"/\")\n\n          get { http_method == \"GET\" }\n          post { http_method == \"POST\" }\n          put { http_method == \"PUT\" }\n          delete { http_method == \"DELETE\" }\n\n          valid_collection { COLLECTIONS[_] == requested_path_sections[0] }\n\n          collection_endpoint {\n            valid_collection\n            count(requested_path_sections) == 1\n          }\n\n          resource_endpoint {\n            valid_collection\n            some resource_id\n            requested_path_sections[1] = resource_id\n          }\n\n          identity_owns_the_resource {\n            identity := input.auth.identity\n            resource_attrs := object.get(input.auth.metadata, \"resource-data\", [])[0]\n            resource_owner := object.get(object.get(resource_attrs, \"owner\", {}), \"id\", \"\")\n            resource_owner == identity.sub\n          }\n\n          allow { get;    collection_endpoint }\n          allow { post;   collection_endpoint }\n          allow { get;    resource_endpoint; identity_owns_the_resource }\n          allow { put;    resource_endpoint; identity_owns_the_resource }\n          allow { delete; resource_endpoint; identity_owns_the_resource }\nEOF\n

    The OPA policy owned-resource above enforces that all users can send GET and POST requests to /greetings, while only resource owners can send GET, PUT and DELETE requests to /greetings/{resource-id}.

    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-access-tokens-with-the-keycloak-server-and-consume-the-api","title":"\u277b Obtain access tokens with the Keycloak server and consume the API","text":""},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-john-and-consume-the-api","title":"Obtain an access token as John and consume the API","text":"

    Obtain an access token for user John (owner of the resource /greetings/1 in the UMA registry):

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    As John, send requests to the API:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2 -i\n# HTTP/1.1 403 Forbidden\n
    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-jane-and-consume-the-api","title":"Obtain an access token as Jane and consume the API","text":"

    Obtain an access token for user Jane (owner of the resource /greetings/2 in the UMA registry):

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    As Jane, send requests to the API:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-peter-and-consume-the-api","title":"Obtain an access token as Peter and consume the API","text":"

    Obtain an access token for user Peter (does not own any resource in the UMA registry):

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=peter' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    As Jane, send requests to the API:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2 -i\n# HTTP/1.1 403 Forbidden\n
    "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authconfig/talker-api-protection\nkubectl delete secret/talker-api-uma-credentials\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/sharding/","title":"User guide: Reducing the operational space","text":"

    By default, Authorino will watch events related to all AuthConfig custom resources in the reconciliation space (namespace or entire cluster). Instances can be configured though to only watch a subset of the resources, thus allowing such as:

    • to reduce noise and lower memory usage inside instances meant for restricted scope (e.g. Authorino deployed as a dedicated sidecar to protect only one host);
    • sharding auth config data across multiple instances;
    • multiple environments (e.g. staging, production) inside of a same cluster/namespace;
    • providing managed instances of Authorino that all watch CRs cluster-wide, yet dedicated to organizations allowed to create and operate their own AuthConfigs across multiple namespaces.
    \u26a0\ufe0f Important: This feature may not be available to users of Authorino via Kuadrant. Authorino capabilities featured in this guide:
    • Sharding
    • Identity verification & authentication \u2192 API key

    Check out as well the user guide about Authentication with API keys.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/sharding/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    "},{"location":"authorino/docs/user-guides/sharding/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/sharding/#deploy-instances-of-authorino","title":"\u2777 Deploy instances of Authorino","text":"

    Deploy an instance of Authorino dedicated to AuthConfigs and API key Secrets labeled with authorino/environment=staging:

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino-staging\nspec:\n  clusterWide: true\n  authConfigLabelSelectors: authorino/environment=staging\n  secretLabelSelectors: authorino/environment=staging\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n

    Deploy an instance of Authorino dedicated to AuthConfigs and API key Secrets labeled with authorino/environment=production, ans NOT labeled disabled:

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino-production\nspec:\n  clusterWide: true\n  authConfigLabelSelectors: authorino/environment=production,!disabled\n  secretLabelSelectors: authorino/environment=production,!disabled\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n

    The commands above will both request instances of Authorino that watch for AuthConfig resources cluster-wide1, with TLS disabled2.

    "},{"location":"authorino/docs/user-guides/sharding/#create-a-namespace-for-user-resources","title":"\u2778 Create a namespace for user resources","text":"
    kubectl create namespace myapp\n
    "},{"location":"authorino/docs/user-guides/sharding/#create-authconfigs-and-api-key-secrets-for-both-instances","title":"\u2779 Create AuthConfigs and API key Secrets for both instances","text":""},{"location":"authorino/docs/user-guides/sharding/#create-resources-for-authorino-staging","title":"Create resources for authorino-staging","text":"

    Create an AuthConfig:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: auth-config-1\n  labels:\n    authorino/environment: staging\nspec:\n  hosts:\n\n  - my-host.staging.io\n  authentication:\n    \"api-key\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino/api-key: \"true\"\n            authorino/environment: staging\nEOF\n

    Create an API key Secret:

    kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino/api-key: \"true\"\n    authorino/environment: staging\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n

    Verify in the logs that only the authorino-staging instance adds the resources to the index:

    kubectl logs $(kubectl get pods -l authorino-resource=authorino-staging -o name)\n# {\"level\":\"info\",\"ts\":1638382989.8327162,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"myapp/auth-config-1\"}\n# {\"level\":\"info\",\"ts\":1638382989.837424,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig/status\":\"myapp/auth-config-1\"}\n# {\"level\":\"info\",\"ts\":1638383144.9486837,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"myapp/api-key-1\"}\n
    "},{"location":"authorino/docs/user-guides/sharding/#create-resources-for-authorino-production","title":"Create resources for authorino-production","text":"

    Create an AuthConfig:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: auth-config-2\n  labels:\n    authorino/environment: production\nspec:\n  hosts:\n\n  - my-host.io\n  authentication:\n    \"api-key\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino/api-key: \"true\"\n            authorino/environment: production\nEOF\n

    Create an API key Secret:

    kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-2\n  labels:\n    authorino/api-key: \"true\"\n    authorino/environment: production\nstringData:\n  api_key: MUWdeBte7AbSWxl6CcvYNJ+3yEIm5CaL\ntype: Opaque\nEOF\n

    Verify in the logs that only the authorino-production instance adds the resources to the index:

    kubectl logs $(kubectl get pods -l authorino-resource=authorino-production -o name)\n# {\"level\":\"info\",\"ts\":1638383423.86086,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig/status\":\"myapp/auth-config-2\"}\n# {\"level\":\"info\",\"ts\":1638383423.8608105,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"myapp/auth-config-2\"}\n# {\"level\":\"info\",\"ts\":1638383460.3515081,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"myapp/api-key-2\"}\n
    "},{"location":"authorino/docs/user-guides/sharding/#remove-a-resource-from-scope","title":"\u277a Remove a resource from scope","text":"
    kubectl -n myapp label authconfig/auth-config-2 disabled=true\n# authconfig.authorino.kuadrant.io/auth-config-2 labeled\n

    Verify in the logs that the authorino-production instance removes the authconfig from the index:

    kubectl logs $(kubectl get pods -l authorino-resource=authorino-production -o name)\n# {\"level\":\"info\",\"ts\":1638383515.6428752,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource de-indexed\",\"authconfig\":\"myapp/auth-config-2\"}\n
    "},{"location":"authorino/docs/user-guides/sharding/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete authorino/authorino-staging\nkubectl delete authorino/authorino-production\nkubectl delete namespace myapp\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. cluster-wide reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    2. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/token-normalization/","title":"User guide: Token normalization","text":"

    Broadly, the term token normalization in authentication systems usually implies the exchange of an authentication token, as provided by the user in a given format, and/or its associated identity claims, for another freshly issued token/set of claims, of a given (normalized) structure or format.

    The most typical use-case for token normalization involves accepting tokens issued by multiple trusted sources and of often varied authentication protocols, while ensuring that the eventual different data structures adopted by each of those sources are normalized, thus allowing to simplify policies and authorization checks that depend on those values. In general, however, any modification to the identity claims can be for the purpose of normalization.

    This user guide focuses on the aspect of mutation of the identity claims resolved from an authentication token, to a certain data format and/or by extending them, so that required attributes can thereafter be trusted to be present among the claims, in a desired form. For such, Authorino allows to extend resolved identity objects with custom attributes (custom claims) of either static values or with values fetched from the Authorization JSON.

    For not only normalizing the identity claims for purpose of writing simpler authorization checks and policies, but also getting Authorino to issue a new token in a normalized format, check the Festival Wristband tokens feature.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 Identity extension
    • Identity verification & authentication \u2192 API key
    • Identity verification & authentication \u2192 JWT verification
    • Authorization \u2192 Pattern-matching authorization

    Check out as well the user guides about Authentication with API keys, OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/token-normalization/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
    • jq, to extract parts of JSON responses

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

    At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/token-normalization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/token-normalization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

    kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino/docs/user-guides/token-normalization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

    The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
    "},{"location":"authorino/docs/user-guides/token-normalization/#setup-envoy","title":"\u2779 Setup Envoy","text":"

    The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

    The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

    kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
    "},{"location":"authorino/docs/user-guides/token-normalization/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

    Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

    This example implements a policy that only users bound to the admin role can send DELETE requests.

    The config trusts access tokens issued by a Keycloak realm as well as API keys labeled specifically to a selected group (friends). The roles of the identities handled by Keycloak are managed in Keycloak, as realm roles. Particularly, users john and peter are bound to the member role, while user jane is bound to roles member and admin. As for the users authenticating with API key, they are all bound to the admin role.

    Without normalizing identity claims from these two different sources, the policy would have to handle the differences of data formats with additional ifs-and-elses. Instead, the config here uses the identity.extendedProperties option to ensure a custom roles (Array) claim is always present in the identity object. In the case of Keycloak ID tokens, the value is extracted from the realm_access.roles claim; for API key-resolved objects, the custom claim is set to the static value [\"admin\"].

    Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
    kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n      overrides:\n        \"roles\":\n          selector: auth.identity.realm_access.roles\n    \"api-key-friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n      defaults:\n        \"roles\":\n          value: [\"admin\"]\n  authorization:\n    \"only-admins-can-delete\":\n      when:\n      - selector: context.request.http.method\n        operator: eq\n        value: DELETE\n      patternMatching:\n        patterns:\n        - selector: auth.identity.roles\n          operator: incl\n          value: admin\nEOF\n
    "},{"location":"authorino/docs/user-guides/token-normalization/#create-an-api-key","title":"\u277b Create an API key","text":"
    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
    "},{"location":"authorino/docs/user-guides/token-normalization/#consume-the-api","title":"\u277c Consume the API","text":""},{"location":"authorino/docs/user-guides/token-normalization/#obtain-an-access-token-and-consume-the-api-as-jane-admin","title":"Obtain an access token and consume the API as Jane (admin)","text":"

    Obtain an access token with the Keycloak server for Jane:

    The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

    Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

    Consume the API as Jane:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/token-normalization/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"

    Obtain an access token with the Keycloak server for John:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    Consume the API as John:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n
    "},{"location":"authorino/docs/user-guides/token-normalization/#consume-the-api-using-the-api-key-to-authenticate-admin","title":"Consume the API using the API key to authenticate (admin)","text":"
    curl -H \"Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
    "},{"location":"authorino/docs/user-guides/token-normalization/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

    "},{"location":"authorino/docs/user-guides/validating-webhook/","title":"User guide: Using Authorino as ValidatingWebhook service","text":"

    Authorino provides an interface for raw HTTP external authorization requests. This interface can be used for integrations other than the typical Envoy gRPC protocol, such as (though not limited to) using Authorino as a generic Kubernetes ValidatingWebhook service.

    The rules to validate a request to the Kubernetes API \u2013 typically a POST, PUT or DELETE request targeting a particular Kubernetes resource or collection \u2013, according to which either the change will be deemed accepted or not, are written in an Authorino AuthConfig custom resource. Authentication and authorization are performed by the Kubernetes API server as usual, with auth features of Authorino implementing the additional validation within the scope of an AdmissionReview request.

    This user guide provides an example of using Authorino as a Kubernetes ValidatingWebhook service that validates requests to CREATE and UPDATE Authorino AuthConfig resources. In other words, we will use Authorino as a validator inside the cluster that decides what is a valid AuthConfig for any application which wants to rely on Authorino to protect itself.

    Authorino capabilities featured in this guide:
    • Identity verification & authentication \u2192 Plain
    • Identity verification & authentication \u2192 Kubernetes TokenReview
    • Identity verification & authentication \u2192 API key
    • External auth metadata \u2192 HTTP GET/GET-by-POST
    • Authorization \u2192 Kubernetes SubjectAccessReview
    • Authorization \u2192 Open Policy Agent (OPA) Rego policies
    • Dynamic response \u2192 Festival Wristband tokens
    • Common feature \u2192 Conditions
    • Common feature \u2192 Priorities

    For further details about Authorino features in general, check the docs.

    "},{"location":"authorino/docs/user-guides/validating-webhook/#requirements","title":"Requirements","text":"
    • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
    • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)

    If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

    kind create cluster --name authorino-tutorial\n

    Deploy the identity provider and authentication server. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

    The Keycloak server is only needed for trying out validating AuthConfig resources that use the authentication server.

    kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

    Using Kuadrant

    If you are a user of Kuadrant you may already have Authorino installed and running. In this case, skip straight to step \u2778.

    At step \u277a, alternatively to creating an AuthConfig custom resource, you may create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

    For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

    "},{"location":"authorino/docs/user-guides/validating-webhook/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

    The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

    curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
    "},{"location":"authorino/docs/user-guides/validating-webhook/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

    Create the namespace:

    kubectl create namespace authorino\n

    Create the TLS certificates:

    curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/authorino/g\" | kubectl -n authorino apply -f -\n

    Create the Authorino instance:

    The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources cluster-wide2, with TLS enabled3.

    kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  clusterWide: true\n  listener:\n    ports:\n      grpc: 50051\n      http: 5001 # for admissionreview requests sent by the kubernetes api server\n    tls:\n      certSecretRef:\n        name: authorino-server-cert\n  oidcServer:\n    tls:\n      certSecretRef:\n        name: authorino-oidc-server-cert\nEOF\n

    For convenience, the same instance of Authorino pointed as the validating webhook will also be targeted for the sample AuthConfigs created to test the validation. For using different instances of Authorino for the validating webhook and for protecting applications behind a proxy, check out the section about sharding in the docs. There is also a user guide on the topic, with concrete examples.

    "},{"location":"authorino/docs/user-guides/validating-webhook/#create-the-authconfig-and-related-clusterrole","title":"\u2778 Create the AuthConfig and related ClusterRole","text":"

    Create the AuthConfig with the auth rules to validate other AuthConfig resources applied to the cluster.

    The AuthConfig to validate other AuthConfigs will enforce the following rules:

    • Authorino features that cannot be used by any application in their security schemes:
    • Anonymous Access
    • Plain identity object extracted from context
    • Kubernetes authentication (TokenReview)
    • Kubernetes authorization (SubjectAccessReview)
    • Festival Wristband tokens
    • Authorino features that require a RoleBinding to a specific ClusterRole in the 'authorino' namespace, to be used in a AuthConfig:
    • Authorino API key authentication
    • All metadata pulled from external sources must be cached for precisely 5 minutes (300 seconds)
    kubectl -n authorino apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: authconfig-validator\nspec:\n  # admissionreview requests will be sent to this host name\n  hosts:\n\n  - authorino-authorino-authorization.authorino.svc\n\n  # because we're using a single authorino instance for the validating webhook and to protect the user applications,\n  # skip operations related to this one authconfig in the 'authorino' namespace\n  when:\n\n  - selector: context.request.http.body.@fromstr|request.object.metadata.namespace\n    operator: neq\n    value: authorino\n\n  # kubernetes admissionreviews carry info about the authenticated user\n  authentication:\n    \"k8s-userinfo\":\n      plain:\n        selector: context.request.http.body.@fromstr|request.userInfo\n\n  authorization:\n    \"features\":\n      opa:\n        rego: |\n          authconfig = json.unmarshal(input.context.request.http.body).request.object\n\n          forbidden { count(object.get(authconfig.spec, \"authentication\", [])) == 0 }\n          forbidden { authconfig.spec.authentication[_].anonymous }\n          forbidden { authconfig.spec.authentication[_].kubernetesTokenReview }\n          forbidden { authconfig.spec.authentication[_].plain }\n          forbidden { authconfig.spec.authorization[_].kubernetesSubjectAccessReview }\n          forbidden { authconfig.spec.response.success.headers[_].wristband }\n\n          apiKey { authconfig.spec.authentication[_].apiKey }\n\n          allow { count(authconfig.spec.authentication) > 0; not forbidden }\n        allValues: true\n\n    \"apikey-authn-requires-k8s-role-binding\":\n      priority: 1\n      when:\n\n      - selector: auth.authorization.features.apiKey\n        operator: eq\n        value: \"true\"\n      kubernetesSubjectAccessReview:\n        user:\n          selector: auth.identity.username\n        resourceAttributes:\n          namespace: { value: authorino }\n          group: { value: authorino.kuadrant.io }\n          resource: { value: authconfigs-with-apikeys }\n          verb: { value: create }\n\n    \"metadata-cache-ttl\":\n      priority: 1\n      opa:\n        rego: |\n          invalid_ttl = input.auth.authorization.features.authconfig.spec.metadata[_].cache.ttl != 300\n          allow { not invalid_ttl }\nEOF\n

    Define a ClusterRole to control the usage of protected features of Authorino:

    kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: authorino-apikey\nrules:\n\n- apiGroups: [\"authorino.kuadrant.io\"]\n  resources: [\"authconfigs-with-apikeys\"] # not a real k8s resource\n  verbs: [\"create\"]\nEOF\n
    "},{"location":"authorino/docs/user-guides/validating-webhook/#create-the-validatingwebhookconfiguration","title":"\u2779 Create the ValidatingWebhookConfiguration","text":"
    kubectl -n authorino apply -f -<<EOF\napiVersion: admissionregistration.k8s.io/v1\nkind: ValidatingWebhookConfiguration\nmetadata:\n  name: authconfig-authz\n  annotations:\n    cert-manager.io/inject-ca-from: authorino/authorino-ca-cert\nwebhooks:\n\n- name: check-authconfig.authorino.kuadrant.io\n  clientConfig:\n    service:\n      namespace: authorino\n      name: authorino-authorino-authorization\n      port: 5001\n      path: /check\n  rules:\n  - apiGroups: [\"authorino.kuadrant.io\"]\n    apiVersions: [\"v1beta2\"]\n    resources: [\"authconfigs\"]\n    operations: [\"CREATE\", \"UPDATE\"]\n    scope: Namespaced\n  sideEffects: None\n  admissionReviewVersions: [\"v1\"]\nEOF\n
    "},{"location":"authorino/docs/user-guides/validating-webhook/#try-it-out","title":"\u277a Try it out","text":"

    Create a namespace:

    kubectl create namespace myapp\n
    "},{"location":"authorino/docs/user-guides/validating-webhook/#with-a-valid-authconfig","title":"With a valid AuthConfig","text":"Kuadrant users \u2013 For this and other example AuthConfigs below, if you create a Kuadrant AuthPolicy instead, the output of the commands shall differ. The requested AuthPolicy may be initially accepted, but its state will turn ready or not ready depending on whether the corresponding AuthConfig requested by Kuadrant is accepted or rejected, according to the validating webhook rules. Check the state of the resources to confirm. For more, see Kuadrant auth.
    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection created\n
    "},{"location":"authorino/docs/user-guides/validating-webhook/#with-forbidden-features","title":"With forbidden features","text":"

    Anonymous access:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":null}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"anonymous-access\":\n      anonymous: {}\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"anonymous-access\\\":{\\\"anonymous\\\":{}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"anonymous-access\":{\"anonymous\":{}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

    Kubernetes TokenReview:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"k8s-tokenreview\":\n      kubernetesTokenReview:\n        audiences: [\"myapp\"]\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"k8s-tokenreview\\\":{\\\"kubernetesTokenReview\\\":{\\\"audiences\\\":[\\\"myapp\\\"]}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"k8s-tokenreview\":{\"kubernetesTokenReview\":{\"audiences\":[\"myapp\"]}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

    Plain identity extracted from context:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"envoy-jwt-authn\":\n      plain:\n        selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"envoy-jwt-authn\\\":{\\\"plain\\\":{\\\"selector\\\":\\\"context.metadata_context.filter_metadata.envoy\\\\\\\\.filters\\\\\\\\.http\\\\\\\\.jwt_authn|verified_jwt\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"envoy-jwt-authn\":{\"plain\":{\"selector\":\"context.metadata_context.filter_metadata.envoy\\\\.filters\\\\.http\\\\.jwt_authn|verified_jwt\"}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

    Kubernetes SubjectAccessReview:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  authorization:\n    \"k8s-subjectaccessreview\":\n      kubernetesSubjectAccessReview:\n        user:\n          selector: auth.identity.sub\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"authorization\\\":{\\\"k8s-subjectaccessreview\\\":{\\\"kubernetesSubjectAccessReview\\\":{\\\"user\\\":{\\\"selector\\\":\\\"auth.identity.sub\\\"}}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authorization\":{\"k8s-subjectaccessreview\":{\"kubernetesSubjectAccessReview\":{\"user\":{\"selector\":\"auth.identity.sub\"}}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

    Festival Wristband tokens:

    kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: wristband-signing-key\nstringData:\n  key.pem: |\n    -----BEGIN EC PRIVATE KEY-----\n    MHcCAQEEIDHvuf81gVlWGo0hmXGTAnA/HVxGuH8vOc7/8jewcVvqoAoGCCqGSM49\n    AwEHoUQDQgAETJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZxJKDysoGwn\n    cnUvHIu23SgW+Ee9lxSmZGhO4eTdQeKxMA==\n    -----END EC PRIVATE KEY-----\ntype: Opaque\n---\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  response:\n    success:\n      headers:\n        \"wristband\":\n          wristband:\n            issuer: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\n            signingKeyRefs:\n            - algorithm: ES256\n              name: wristband-signing-key\nEOF\n# secret/wristband-signing-key created\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"],\\\"response\\\":{\\\"success\\\":{\\\"headers\\\":{\\\"wristband\\\":{\\\"wristband\\\":{\\\"issuer\\\":\\\"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\\\",\\\"signingKeyRefs\\\":[{\\\"algorithm\\\":\\\"ES256\\\",\\\"name\\\":\\\"wristband-signing-key\\\"}]}}}}}}}\\n\"}},\"spec\":{\"response\":{\"success\":{\"headers\":{\"wristband\":{\"wristband\":{\"issuer\":\"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\",\"signingKeyRefs\":[{\"algorithm\":\"ES256\",\"name\":\"wristband-signing-key\"}]}}}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
    "},{"location":"authorino/docs/user-guides/validating-webhook/#with-features-that-require-additional-permissions","title":"With features that require additional permissions","text":"

    Before adding the required permissions:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"api-key\":\n      apiKey:\n        selector:\n          matchLabels: { app: myapp }\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"api-key\\\":{\\\"apiKey\\\":{\\\"selector\\\":{\\\"matchLabels\\\":{\\\"app\\\":\\\"myapp\\\"}}}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"api-key\":{\"apiKey\":{\"selector\":{\"matchLabels\":{\"app\":\"myapp\"}}}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Not authorized: unknown reason\n

    Add the required permissions:

    kubectl -n authorino apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: authorino-apikey\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: authorino-apikey\nsubjects:\n\n- kind: User\n  name: kubernetes-admin\nEOF\n# rolebinding.rbac.authorization.k8s.io/authorino-apikey created\n

    After adding the required permissions:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"api-key\":\n      apiKey:\n        selector:\n          matchLabels: { app: myapp }\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection configured\n
    "},{"location":"authorino/docs/user-guides/validating-webhook/#with-features-that-require-specific-property-validation","title":"With features that require specific property validation","text":"

    Invalid:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  metadata:\n    \"external-source\":\n      http:\n        url: http://metadata.io\n      cache:\n        key: { value: global }\n        ttl: 60\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"],\\\"metadata\\\":{\\\"external-source\\\":{\\\"cache\\\":{\\\"key\\\":{\\\"value\\\":\\\"global\\\"},\\\"ttl\\\":60},\\\"http\\\":{\\\"url\\\":\\\"http://metadata.io\\\"}}}}}\\n\"}},\"spec\":{\"authentication\":{\"api-key\":null,\"keycloak\":{\"jwt\":{\"issuerUrl\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\"}}},\"metadata\":{\"external-source\":{\"cache\":{\"key\":{\"value\":\"global\"},\"ttl\":60},\"http\":{\"url\":\"http://metadata.io\"}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

    Valid:

    kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  metadata:\n    \"external-source\":\n      http:\n        url: http://metadata.io\n      cache:\n        key: { value: global }\n        ttl: 300\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection configured\n
    "},{"location":"authorino/docs/user-guides/validating-webhook/#cleanup","title":"Cleanup","text":"

    If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

    kind delete cluster --name authorino-tutorial\n

    Otherwise, delete the resources created in each step:

    kubectl delete namespace myapp\nkubectl delete namespace authorino\nkubectl delete clusterrole authorino-apikey\nkubectl delete namespace keycloak\n

    To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

    kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
    1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

    2. cluster-wide reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

    3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

    "},{"location":"authorino-operator/","title":"Authorino Operator","text":"

    A Kubernetes Operator to manage Authorino instances.

    "},{"location":"authorino-operator/#installation","title":"Installation","text":"

    The Operator can be installed by applying the manifests to the Kubernetes cluster or using Operator Lifecycle Manager (OLM)

    "},{"location":"authorino-operator/#applying-the-manifests-to-the-cluster","title":"Applying the manifests to the cluster","text":"
    1. Install the Operator manifests
    make install\n
    1. Deploy the Operator
    make deploy\n
    Tip: Deploy a custom image of the Operator To deploy an image of the Operator other than the default quay.io/kuadrant/authorino-operator:latest, specify by setting the OPERATOR_IMAGE parameter. E.g.:
    make deploy OPERATOR_IMAGE=authorino-operator:local\n
    "},{"location":"authorino-operator/#installing-via-olm","title":"Installing via OLM","text":"

    To install the Operator using the Operator Lifecycle Manager, you need to make the Operator CSVs available in the cluster by creating a CatalogSource resource.

    The bundle and catalog images of the Operator are available in Quay.io:

    Bundle quay.io/kuadrant/authorino-operator-bundle Catalog quay.io/kuadrant/authorino-operator-catalog
    1. Create the namespace for the Operator
    kubectl create namespace authorino-operator\n
    1. Create the CatalogSource resource pointing to one of the images from in the Operator's catalog repo:
    kubectl -n authorino-operator apply -f -<<EOF\napiVersion: operators.coreos.com/v1alpha1\nkind: CatalogSource\nmetadata:\n  name: operatorhubio-catalog\n  namespace: authorino-operator\nspec:\n  sourceType: grpc\n  image: quay.io/kuadrant/authorino-operator-catalog:latest\n  displayName: Authorino Operator\nEOF\n
    "},{"location":"authorino-operator/#deploy-authorino-operator-using-operator-sdk","title":"Deploy authorino operator using operator-sdk","text":"
    1. Install operator-sdk bin
      make operator-sdk\n
    2. Run operator-sdk bundle command
      ./bin/operator-sdk run bundle quay.io/kuadrant/authorino-operator-bundle:latest\n
      Note: For s390x & ppc64le , use operator-sdk to install authorino-operator
    "},{"location":"authorino-operator/#requesting-an-authorino-instance","title":"Requesting an Authorino instance","text":"

    Once the Operator is up and running, you can request instances of Authorino by creating Authorino CRs. E.g.:

    kubectl -n default apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
    "},{"location":"authorino-operator/#the-authorino-custom-resource-definition-crd","title":"The Authorino Custom Resource Definition (CRD)","text":"

    API to install, manage and configure Authorino authorization services .

    Each Authorino Custom Resource (CR) represents an instance of Authorino deployed to the cluster. The Authorino Operator will reconcile the state of the Kubernetes Deployment and associated resources, based on the state of the CR.

    "},{"location":"authorino-operator/#api-specification","title":"API Specification","text":"Field Type Description Required/Default spec AuthorinoSpec Specification of the Authorino deployment. Required"},{"location":"authorino-operator/#authorinospec","title":"AuthorinoSpec","text":"Field Type Description Required/Default clusterWide Boolean Sets the Authorino instance's watching scope \u2013 cluster-wide or namespaced. Default: true (cluster-wide) authConfigLabelSelectors String Label selectors used by the Authorino instance to filter AuthConfig-related reconciliation events. Default: empty (all AuthConfigs are watched) secretLabelSelectors String Label selectors used by the Authorino instance to filter Secret-related reconciliation events (API key and mTLS authentication methods). Default: authorino.kuadrant.io/managed-by=authorino supersedingHostSubsets Boolean Enable/disable allowing AuthConfigs to supersede strict subsets of hosts already taken. Default: false replicas Integer Number of replicas desired for the Authorino instance. Values greater than 1 enable leader election in the Authorino service, where the leader updates the statuses of the AuthConfig CRs). Default: 1 evaluatorCacheSize Integer Cache size (in megabytes) of each Authorino evaluator (when enabled in an AuthConfig). Default: 1 image String Authorino image to be deployed (for dev/testing purpose only). Default: quay.io/kuadrant/authorino:latest imagePullPolicy String Sets the imagePullPolicy of the Authorino Deployment (for dev/testing purpose only). Default: k8s default logLevel String Defines the level of log you want to enable in Authorino (debug, info and error). Default: info logMode String Defines the log mode in Authorino (development or production). Default: production listener Listener Specification of the authorization service (gRPC interface). Required oidcServer OIDCServer Specification of the OIDC service. Required tracing Tracing Configuration of the OpenTelemetry tracing exporter. Optional metrics Metrics Configuration of the metrics server (port, level). Optional healthz Healthz Configuration of the health/readiness probe (port). Optional volumes VolumesSpec Additional volumes to be mounted in the Authorino pods. Optional"},{"location":"authorino-operator/#listener","title":"Listener","text":"

    Configuration of the authorization server \u2013 gRPC and raw HTTP interfaces

    Field Type Description Required/Default port Integer Port number of authorization server (gRPC interface). DEPRECATEDUse ports instead ports Ports Port numbers of the authorization server (gRPC and raw HTTPinterfaces). Optional tls TLS TLS configuration of the authorization server (GRPC and HTTP interfaces). Required timeout Integer Timeout of external authorization request (in milliseconds), controlled internally by the authorization server. Default: 0 (disabled)"},{"location":"authorino-operator/#oidcserver","title":"OIDCServer","text":"

    Configuration of the OIDC Discovery server for Festival Wristband tokens.

    Field Type Description Required/Default port Integer Port number of OIDC Discovery server for Festival Wristband tokens. Default: 8083 tls TLS TLS configuration of the OIDC Discovery server for Festival Wristband tokens Required"},{"location":"authorino-operator/#tls","title":"TLS","text":"

    TLS configuration of server. Appears in listener and oidcServer.

    Field Type Description Required/Default enabled Boolean Whether TLS is enabled or disabled for the server. Default: true certSecretRef LocalObjectReference The reference to the secret that contains the TLS certificates tls.crt and tls.key. Required when enabled: true"},{"location":"authorino-operator/#ports","title":"Ports","text":"

    Port numbers of the authorization server.

    Field Type Description Required/Default grpc Integer Port number of the gRPC interface of the authorization server. Set to 0 to disable this interface. Default: 50001 http Integer Port number of the raw HTTP interface of the authorization server. Set to 0 to disable this interface. Default: 5001"},{"location":"authorino-operator/#tracing","title":"Tracing","text":"

    Configuration of the OpenTelemetry tracing exporter.

    Field Type Description Required/Default endpoint String Full endpoint of the OpenTelemetry tracing collector service (e.g. http://jaeger:14268/api/traces). Required tags Map Key-value map of fixed tags to add to all OpenTelemetry traces emitted by Authorino. Optional insecure Boolean Enable/disable insecure connection to the tracing endpoint Default: false"},{"location":"authorino-operator/#metrics","title":"Metrics","text":"

    Configuration of the metrics server.

    Field Type Description Required/Default port Integer Port number of the metrics server. Default: 8080 deep Boolean Enable/disable metrics at the level of each evaluator config (if requested in the AuthConfig) exported by the metrics server. Default: false"},{"location":"authorino-operator/#healthz","title":"Healthz","text":"

    Configuration of the health/readiness probe (port).

    Field Type Description Required/Default port Integer Port number of the health/readiness probe. Default: 8081"},{"location":"authorino-operator/#volumesspec","title":"VolumesSpec","text":"

    Additional volumes to project in the Authorino pods. Useful for validation of TLS self-signed certificates of external services known to have to be contacted by Authorino at runtime.

    Field Type Description Required/Default items []VolumeSpec List of additional volume items to project. Optional defaultMode Integer Mode bits used to set permissions on the files. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. Optional"},{"location":"authorino-operator/#volumespec","title":"VolumeSpec","text":"Field Type Description Required/Default name String Name of the volume and volume mount within the Deployment. It must be unique in the CR. Optional mountPath String Absolute path where to mount all the items. Required configMaps []String List of of Kubernetes ConfigMap names to mount. Required exactly one of: confiMaps, secrets. secrets []String List of of Kubernetes Secret names to mount. Required exactly one of: confiMaps, secrets. items []KeyToPath Mount details for selecting specific ConfigMap or Secret entries. Optional"},{"location":"authorino-operator/#full-example","title":"Full example","text":"
    apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  clusterWide: true\n  authConfigLabelSelectors: environment=production\n  secretLabelSelectors: authorino.kuadrant.io/component=authorino,environment=production\n\n  replicas: 2\n\n  evaluatorCacheSize: 2 # mb\n\n  image: quay.io/kuadrant/authorino:latest\n  imagePullPolicy: Always\n\n  logLevel: debug\n  logMode: production\n\n  listener:\n    ports:\n      grpc: 50001\n      http: 5001\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-server-cert # secret must contain `tls.crt` and `tls.key` entries\n\n  oidcServer:\n    port: 8083\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-oidc-server-cert # secret must contain `tls.crt` and `tls.key` entries\n\n  metrics:\n    port: 8080\n    deep: true\n\n  volumes:\n    items:\n\n      - name: keycloak-tls-cert\n        mountPath: /etc/ssl/certs\n        configMaps:\n          - keycloak-tls-cert\n        items: # details to mount the k8s configmap in the authorino pods\n          - key: keycloak.crt\n            path: keycloak.crt\n    defaultMode: 420\n
    "},{"location":"authorino-operator/#removal","title":"Removal","text":""},{"location":"authorino-operator/#removing-the-operator-installed-via-manifests","title":"Removing the operator installed via manifests","text":"
    1. Undeploy the Operator
    make undeploy\n
    1. Remove the Operator manifests
    make uninstall\n
    "},{"location":"authorino-operator/#remove-dependencies-optional","title":"Remove dependencies (Optional)","text":"
    1. Remove operator namespace

      make delete-namespace\n

    2. Uninstall cert manager

      make uninstall-cert-manager\n

    "},{"location":"authorino-operator/#license","title":"License","text":""},{"location":"limitador/","title":"Limitador","text":"

    Limitador is a generic rate-limiter written in Rust. It can be used as a library, or as a service. The service exposes HTTP endpoints to apply and observe limits. Limitador can be used with Envoy because it also exposes a grpc service, on a different port, that implements the Envoy Rate Limit protocol (v3).

    • Getting started
    • How it works
    • Configuration
    • Development
    • Testing Environment
    • Kubernetes
    • Contributing
    • License

    Limitador is under active development, and its API has not been stabilized yet.

    "},{"location":"limitador/#getting-started","title":"Getting started","text":"
    • Rust library
    • Server
    "},{"location":"limitador/#rust-library","title":"Rust library","text":"

    Add this to your Cargo.toml:

    [dependencies]\nlimitador = { version = \"0.3.0\" }\n

    For more information, see the README of the crate

    "},{"location":"limitador/#server","title":"Server","text":"

    Run with Docker (replace latest with the version you want):

    docker run --rm --net=host -it quay.io/kuadrant/limitador:v1.0.0\n

    Run locally:

    cargo run --release --bin limitador-server -- --help\n

    Refer to the help message on how to start up the server. More information are available in the server's README.md

    "},{"location":"limitador/#development","title":"Development","text":""},{"location":"limitador/#build","title":"Build","text":"
    cargo build\n
    "},{"location":"limitador/#run-the-tests","title":"Run the tests","text":"

    Some tests need a redis deployed in localhost:6379. You can run it in Docker with:

    docker run --rm -p 6379:6379 -it redis\n

    Then, run the tests:

    cargo test --all-features\n

    or you can run tests disabling the \"redis storage\" feature:

    cd limitador; cargo test --no-default-features\n

    "},{"location":"limitador/#contributing","title":"Contributing","text":"

    Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.

    "},{"location":"limitador/#license","title":"License","text":"

    Apache 2.0 License

    "},{"location":"limitador/doc/how-it-works/","title":"How it works","text":""},{"location":"limitador/doc/how-it-works/#how-it-works","title":"How it works","text":"

    Limitador will increment counters for all Limits that apply, if any of these counter is above its Limit's max_value the request will be considered to be rate limited. So think of it as if the most restrictive limit configuration will apply.

    Limitador will evaluate whether a Limit applies against its namespace, its conditions and whether all variables are resolvable. The namespace for the descriptors is defined by the domain field from the service.ratelimit.v3.RateLimitRequest. For each matching Limit, its counter is increased and checked against the Limit max_value.

    One example to illustrate:

    Let's say we have one rate limit:

    conditions: [ \"descriptors[0].KEY_A == 'VALUE_A'\" ]\nmax_value: 1\nseconds: 60\nvariables: []\nnamespace: example.org\n

    Limitador Server receives a request with one descriptor with two entries:

    domain: example.org\ndescriptors:\n\n  - entries:\n    - KEY_A: VALUE_A\n    - OTHER_KEY: OTHER_VALUE\n

    The counter's condition all match. Then, the counter will be increased and the limit checked. If the limit is exceeded, the request will be rejected with 429 Too Many Requests, otherwise accepted.

    Note that the counter is being activated even though it does not match all the entries of the descriptor. The same rule applies for the variables field.

    Conditions are CEL expressions evaluating to a bool value.

    The variables field is a list of keys. The matching rule is defined just as the existence of the list of descriptor entries with the same key values. If variables is variables: [\"descriptors[0].A\", \"descriptors[0].B\", \"descriptors[0].C]\", the limit will match if the first descriptor has at least three entries with the same A, B, C keys.

    Few examples to illustrate.

    Having the following descriptors:

    domain: example.org\ndescriptors:\n\n  - entries:\n    - KEY_A: VALUE_A\n    - OTHER_KEY: OTHER_VALUE\n

    the following counters would not be activated.

    conditions: [ \"descriptors[0].KEY_B == 'VALUE_B'\" ]\nmax_value: 1\nseconds: 60\nvariables: []\nnamespace: example.org\n
    Reason: conditions key does not exist

    conditions:\n\n  - \"descriptors[0].KEY_A == 'VALUE_A'\"\n  - \"descriptors[0].OTHER_KEY == 'WRONG_VALUE'\"\nmax_value: 1\nseconds: 60\nvariables: []\nnamespace: example.org\n
    Reason: not all the conditions match

    conditions: []\nmax_value: 1\nseconds: 60\nvariables: [ \"descriptors[0].MY_VAR\" ]\nnamespace: example.org\n
    Reason: the variable name does not exist

    conditions: [ \"descriptors[0].KEY_B == 'VALUE_B'\" ]\nmax_value: 1\nseconds: 60\nvariables: [ \"descriptors[0].MY_VAR\" ]\nnamespace: example.org\n
    Reason: Both variables and conditions must match. In this particular case, only conditions match

    "},{"location":"limitador/doc/topologies/","title":"Deployment topologies","text":""},{"location":"limitador/doc/topologies/#in-memory","title":"In-memory","text":""},{"location":"limitador/doc/topologies/#redis","title":"Redis","text":""},{"location":"limitador/doc/topologies/#redis-active-active-storage","title":"Redis active-active storage","text":"

    The RedisLabs version of Redis supports active-active replication. Limitador is compatible with that deployment mode, but there are a few things to take into account regarding limit accuracy.

    "},{"location":"limitador/doc/topologies/#considerations","title":"Considerations","text":"

    With an active-active deployment, the data needs to be replicated between instances. An update in an instance takes a short time to be reflected in the other. That time lag depends mainly on the network speed between the Redis instances, and it affects the accuracy of the rate-limiting performed by Limitador because it can go over limits while the updates of the counters are being replicated.

    The impact of that greatly depends on the use case. With limits of a few seconds, and a low number of hits, we could easily go over limits. On the other hand, if we have defined limits with a high number of hits and a long period, the effect will be basically negligible. For example, if we define a limit of one hour, and we know that the data takes around one second to be replicated, the accuracy loss is going to be negligible.

    "},{"location":"limitador/doc/topologies/#set-up","title":"Set up","text":"

    In order to try active-active replication, you can follow this tutorial from RedisLabs.

    "},{"location":"limitador/doc/topologies/#disk","title":"Disk","text":"

    Disk storage using RocksDB. Counters are held on disk (persistent).

    "},{"location":"limitador/doc/migrations/conditions/","title":"New condition syntax","text":"

    With limitador-server version 1.0.0 (and the limitador crate version 0.3.0), the syntax for conditions within limit definitions has changed.

    "},{"location":"limitador/doc/migrations/conditions/#note-this-synthax-has-been-deprecated-as-of-version-200","title":"Note! This synthax has been deprecated as of version 2.0.0","text":""},{"location":"limitador/doc/migrations/conditions/#changes-when-working-with-limitador-server-versions-1x","title":"Changes when working with Limitador Server versions 1.x","text":""},{"location":"limitador/doc/migrations/conditions/#the-new-syntax","title":"The new syntax","text":"

    The new syntax formalizes what part of an expression is the identifier and which is the value to test against. Identifiers are simple string value, while string literals are to be demarcated by single quotes (') or double quotes (\") so that foo == \" bar\" now makes it explicit that the value is to be prefixed with a space character.

    A few remarks:

    • Only string values are supported, as that's what they really are
    • There is no escape character sequence supported in string literals
    • A new operator has been added, !=
    "},{"location":"limitador/doc/migrations/conditions/#the-issue-with-the-deprecated-syntax","title":"The issue with the deprecated syntax","text":"

    The previous syntax wouldn't differentiate between values and the identifier, so that foo == bar was valid. In this case foo was the identifier of the variable, while bar was the value to evaluate it against. Whitespaces before and after the operator == would be equally important. SO that foo == bar would test for a foo variable being equal to bar where the trailing whitespace after the identifier, and the one prefixing the value, would have been evaluated.

    "},{"location":"limitador/doc/server/configuration/","title":"Limitador configuration","text":""},{"location":"limitador/doc/server/configuration/#command-line-configuration","title":"Command line configuration","text":"

    The preferred way of starting and configuring the Limitador server is using the command line:

    Rate Limiting Server\n\nUsage: limitador-server [OPTIONS] <LIMITS_FILE> [STORAGE]\n\nSTORAGES:\n  memory        Counters are held in Limitador (ephemeral)\n  disk          Counters are held on disk (persistent)\n  redis         Uses Redis to store counters\n  redis_cached  Uses Redis to store counters, with an in-memory cache\n\nArguments:\n  <LIMITS_FILE>  The limit file to use\n\nOptions:\n  -b, --rls-ip <ip>\n          The IP to listen on for RLS [default: 0.0.0.0]\n  -p, --rls-port <port>\n          The port to listen on for RLS [default: 8081]\n  -B, --http-ip <http_ip>\n          The IP to listen on for HTTP [default: 0.0.0.0]\n  -P, --http-port <http_port>\n          The port to listen on for HTTP [default: 8080]\n  -l, --limit-name-in-labels\n          Include the Limit Name in prometheus label\n      --tracing-endpoint <tracing_endpoint>\n          The host for the tracing service [default: ]\n  -v...\n          Sets the level of verbosity\n      --validate\n          Validates the LIMITS_FILE and exits\n  -H, --rate-limit-headers <rate_limit_headers>\n          Enables rate limit response headers [default: NONE] [possible values: NONE, DRAFT_VERSION_03]\n      --grpc-reflection-service\n          Enables gRPC server reflection service\n  -h, --help\n          Print help\n  -V, --version\n          Print version\n

    The values used are authoritative over any environment variables independently set.

    "},{"location":"limitador/doc/server/configuration/#limit-definitions","title":"Limit definitions","text":"

    The LIMITS_FILE provided is the source of truth for all the limits that will be enforced. The file location will be monitored by the server for any changes and be hot reloaded. If the changes are invalid, they will be ignored on hot reload, or the server will fail to start.

    "},{"location":"limitador/doc/server/configuration/#the-limits_files-format","title":"The LIMITS_FILE's format","text":"

    When starting the server, you point it to a LIMITS_FILE, which is expected to be a yaml file with an array of limit definitions, with the following format:

    ---\n\"$schema\": http://json-schema.org/draft-04/schema#\ntype: object\nproperties:\n  name:\n    type: string\n  namespace:\n    type: string\n  seconds:\n    type: integer\n  max_value:\n    type: integer\n  conditions:\n    type: array\n    items:\n\n      - type: string\n  variables:\n    type: array\n    items:\n      - type: string\nrequired:\n  - namespace\n  - seconds\n  - max_value\n  - conditions\n  - variables\n

    Here is an example of such a limit definition:

    - namespace: example.org\n  max_value: 10\n  seconds: 60\n  conditions:\n    - \"descriptors[0].req_method == 'GET'\"\n  variables:\n    - descriptors[0].user_id\n
    • namespace namespaces the limit, will generally be the domain, see here
    • seconds is the duration for which the limit applies, in seconds: e.g. 60 is a span of time of one minute
    • max_value is the actual limit, e.g. 100 would limit to 100 requests
    • name lets the user optionally name the limit
    • variables is an array of variables, which once resolved, will be used to qualify counters for the limit, e.g. api_key to limit per api keys
    • conditions is an array of conditions, which once evaluated will decide whether to apply the limit or not
    "},{"location":"limitador/doc/server/configuration/#condition-syntax","title":"condition syntax","text":"

    Each condition is an expression producing a boolean value (true or false). All conditions must evaluate to true for the limit to be applied on a request.

    These predicates are CEL Expressions that operate on the context provided by the Limit itself (it's id and namefields), along with the descriptors from Envoy's service.ratelimit.v3.RateLimitRequest, each of which being exposed a List of Map with both keys and values as String.

    "},{"location":"limitador/doc/server/configuration/#counter-storages","title":"Counter storages","text":"

    Limitador will load all the limit definitions from the LIMITS_FILE and keep these in memory. To enforce these limits, Limitador needs to track requests in the form of counters. There would be at least one counter per limit, but that number grows when variables are used to qualify counters per some arbitrary values.

    "},{"location":"limitador/doc/server/configuration/#memory","title":"memory","text":"

    As the name implies, Limitador will keep all counters in memory. This yields the best results in terms of latency as well as accuracy. By default, only up to 1000 \"concurrent\" counters will be kept around, evicting the oldest entries. \"Concurrent\" in this context means counters that need to exist at the \"same time\", based of the period of the limit, as \"expired\" counters are discarded.

    This storage is ephemeral, as if the process is restarted, all the counters are lost and effectively \"reset\" all the limits as if no traffic had been rate limited, which can be fine for short-lived limits, less for longer-lived ones.

    "},{"location":"limitador/doc/server/configuration/#redis","title":"redis","text":"

    When you want persistence of your counters, such as for disaster recovery or across restarts, using redis will store the counters in a redis instance using the provided URL. Increments to individual counters is made within redis itself, providing accuracy over these, races tho can occur when multiple Limitador servers are used against a single redis and using \"stacked\" limits (i.e. over different periods). Latency is also impacted, as it results in one additional hop to talk to redis and maintain the counters.

    TLS Support

    Connect to a redis instance using the rediss:// URL scheme.

    To enable insecure mode, append #insecure at the end of the URL. For example:

    limitador-server <LIMITS_FILE> redis rediss://127.0.0.1/#insecure\"\n

    Authentication

    To enable authentication, use the username and password properties of the URL scheme. For example:

    limitador-server <LIMITS_FILE> redis redis://my-username:my-password@127.0.0.1\"\n

    when the username is omitted, redis assumes default user. For example:

    limitador-server <LIMITS_FILE> redis redis://:my-password@127.0.0.1\"\n

    Usage

    Uses Redis to store counters\n\nUsage: limitador-server <LIMITS_FILE> redis <URL>\n\nArguments:\n  <URL>  Redis URL to use\n\nOptions:\n  -h, --help  Print help\n
    "},{"location":"limitador/doc/server/configuration/#redis_cached","title":"redis_cached","text":"

    In order to avoid some communication overhead to redis, redis_cached adds an in memory caching layer within the Limitador servers. This lowers the latency, but sacrifices some accuracy as it will not only cache counters, but also coalesce counters updates to redis over time. See this configuration option for more information.

    TLS Support

    Connect to a redis instance using the rediss:// URL scheme.

    To enable insecure mode, append #insecure at the end of the URL. For example:

    limitador-server <LIMITS_FILE> redis rediss://127.0.0.1/#insecure\"\n

    Authentication

    To enable authentication, use the username and password properties of the URL scheme. For example:

    limitador-server <LIMITS_FILE> redis redis://my-username:my-password@127.0.0.1\"\n

    when the username is omitted, redis assumes default user. For example:

    limitador-server <LIMITS_FILE> redis redis://:my-password@127.0.0.1\"\n

    Usage

    Uses Redis to store counters, with an in-memory cache\n\nUsage: limitador-server <LIMITS_FILE> redis_cached [OPTIONS] <URL>\n\nArguments:\n  <URL>  Redis URL to use\n\nOptions:\n      --batch-size <batch>          Size of entries to flush in as single flush [default: 100]\n      --flush-period <flush>        Flushing period for counters in milliseconds [default: 1000]\n      --max-cached <max>            Maximum amount of counters cached [default: 10000]\n      --response-timeout <timeout>  Timeout for Redis commands in milliseconds [default: 350]\n  -h, --help                        Print help\n
    "},{"location":"limitador/doc/server/configuration/#disk","title":"disk","text":"

    Disk storage using RocksDB. Counters are held on disk (persistent).

    Counters are held on disk (persistent)\n\nUsage: limitador-server <LIMITS_FILE> disk [OPTIONS] <PATH>\n\nArguments:\n  <PATH>  Path to counter DB\n\nOptions:\n      --optimize <OPTIMIZE>  Optimizes either to save disk space or higher throughput [default: throughput] [possible values: throughput, disk]\n  -h, --help                 Print help\n

    For an in-depth coverage of the different topologies supported and how they affect the behavior, see the topologies' document.

    "},{"location":"limitador/doc/server/configuration/#configuration-using-environment-variables","title":"Configuration using environment variables","text":"

    The Limitador server has some options that can be configured with environment variables. These will override the default values the server uses. Any argument used when starting the server will prevail over the environment variables.

    "},{"location":"limitador/doc/server/configuration/#envoy_rls_host","title":"ENVOY_RLS_HOST","text":"
    • Host where the Envoy RLS server listens.
    • Optional. Defaults to \"0.0.0.0\".
    • Format: string.
    "},{"location":"limitador/doc/server/configuration/#envoy_rls_port","title":"ENVOY_RLS_PORT","text":"
    • Port where the Envoy RLS server listens.
    • Optional. Defaults to 8081.
    • Format: integer.
    "},{"location":"limitador/doc/server/configuration/#http_api_host","title":"HTTP_API_HOST","text":"
    • Host where the HTTP server listens.
    • Optional. Defaults to \"0.0.0.0\".
    • Format: string.
    "},{"location":"limitador/doc/server/configuration/#http_api_port","title":"HTTP_API_PORT","text":"
    • Port where the HTTP API listens.
    • Optional. Defaults to 8080.
    • Format: integer.
    "},{"location":"limitador/doc/server/configuration/#limits_file","title":"LIMITS_FILE","text":"
    • YAML file that contains the limits to create when Limitador boots. If the limits specified already have counters associated, Limitador will not delete them. Changes to the file will be picked up by the running server.
    • Required. No default
    • Format: string, file path.
    "},{"location":"limitador/doc/server/configuration/#limit_name_in_prometheus_labels","title":"LIMIT_NAME_IN_PROMETHEUS_LABELS","text":"
    • Enables using limit names as labels in Prometheus metrics. This is disabled by default because for a few limits it should be fine, but it could become a problem when defining lots of limits. See the caution note in the Prometheus docs
    • Optional. Disabled by default.
    • Format: bool, set to \"1\" to enable.
    "},{"location":"limitador/doc/server/configuration/#tracing_endpoint","title":"TRACING_ENDPOINT","text":"
    • The endpoint of the OTLP tracing collector (scheme://host:port).
    • Optional. Default to \"\" (tracing disabled)
    • Format: string
    "},{"location":"limitador/doc/server/configuration/#redis_local_cache_enabled","title":"REDIS_LOCAL_CACHE_ENABLED","text":"
    • Enables a storage implementation that uses Redis, but also caches some data in memory. The idea is to improve throughput and latencies by caching the counters in memory to reduce the number of accesses to Redis. To achieve that, this mode sacrifices some rate-limit accuracy. This mode does two things:
      • Batches counter updates. Instead of updating the counters on every request, it updates them in memory and commits them to Redis in batches. The flushing interval can be configured with the REDIS_LOCAL_CACHE_FLUSHING_PERIOD_MS env. The trade-off is that when running several instances of Limitador, other instances will not become aware of the counter updates until they're committed to Redis.
      • Caches counters. Instead of fetching the value of a counter every time it's needed, the value is cached for a configurable period. The trade-off is that when running several instances of Limitador, an instance will not become aware of the counter updates other instances do while the value is cached. When a counter is already at 0 (limit exceeded), it's cached until it expires in Redis. In this case, no matter what other instances do, we know that the quota will not be reestablished until the key expires in Redis, so in this case, rate-limit accuracy is not affected. When a counter has still some quota remaining the situation is different, that's why we can tune for how long it will be cached. The formula is as follows: MIN(ttl_in_redis/REDIS_LOCAL_CACHE_TTL_RATIO_CACHED_COUNTERS, REDIS_LOCAL_CACHE_MAX_TTL_CACHED_COUNTERS_MS). For example, let's image that the current TTL (time remaining until the limit resets) in Redis for a counter is 10 seconds, and we set the ratio to 2, and the max time for 30s. In this case, the counter will be cached for 5s (min(10/2, 30)). During those 5s, Limitador will not fetch the value of that counter from Redis, so it will answer faster, but it will also miss the updates done by other instances, so it can go over the limits in that 5s interval.
    • Optional. Disabled by default.
    • Format: set to \"1\" to enable.
    • Note: \"REDIS_URL\" needs to be set.
    "},{"location":"limitador/doc/server/configuration/#redis_local_cache_flushing_period_ms","title":"REDIS_LOCAL_CACHE_FLUSHING_PERIOD_MS","text":"
    • Used to configure the maximum flushing period. See REDIS_LOCAL_CACHE_ENABLED. This env only applies when \"REDIS_LOCAL_CACHE_ENABLED\" == 1.
    • Optional. Defaults to 1000.
    • Format: integer. Duration in milliseconds.
    "},{"location":"limitador/doc/server/configuration/#redis_local_cache_batch_size","title":"REDIS_LOCAL_CACHE_BATCH_SIZE","text":"
    • Used to configure the maximum number of counters to update in a flush. See REDIS_LOCAL_CACHE_ENABLED. This env only applies when \"REDIS_LOCAL_CACHE_ENABLED\" == 1.
    • Optional. Defaults to 100.
    • Format: integer.
    "},{"location":"limitador/doc/server/configuration/#redis_url","title":"REDIS_URL","text":"
    • Redis URL. Required only when you want to use Redis to store the limits.
    • Optional. By default, Limitador stores the limits in memory and does not require Redis.
    • Format: string, URL in the format of \"redis://127.0.0.1:6379\".
    "},{"location":"limitador/doc/server/configuration/#rust_log","title":"RUST_LOG","text":"
    • Defines the log level.
    • Optional. Defaults to \"error\".
    • Format: enum: \"debug\", \"error\", \"info\", \"warn\", or \"trace\".
    "},{"location":"limitador/doc/server/configuration/#rate_limit_headers","title":"RATE_LIMIT_HEADERS","text":"
    • Enables rate limit response headers. Only supported by the RLS server.
    • Optional. Defaults to \"NONE\".
    • Must be one of:
    • \"NONE\" - Does not add any additional headers to the http response.
    • \"DRAFT_VERSION_03\". Adds response headers per https://datatracker.ietf.org/doc/id/draft-polli-ratelimit-headers-03.html
    "},{"location":"limitador/limitador/","title":"Limitador (library)","text":"

    An embeddable rate-limiter library supporting in-memory, Redis and disk data stores.

    For the complete documentation of the crate's API, please refer to docs.rs

    "},{"location":"limitador/limitador/#features","title":"Features","text":"
    • redis_storage: support for using Redis as the data storage backend.
    • disk_storage: support for using RocksDB as a local disk storage backend.
    • default: redis_storage.
    "},{"location":"limitador/limitador-server/","title":"Limitador (server)","text":"

    By default, Limitador starts the HTTP server in localhost:8080, and the grpc service that implements the Envoy Rate Limit protocol in localhost:8081. That can be configured with these ENVs: ENVOY_RLS_HOST, ENVOY_RLS_PORT, HTTP_API_HOST, and HTTP_API_PORT.

    Or using the command line arguments:

    Rate Limiting Server\n\nUsage: limitador-server [OPTIONS] <LIMITS_FILE> [STORAGE]\n\nSTORAGES:\n  memory        Counters are held in Limitador (ephemeral)\n  disk          Counters are held on disk (persistent)\n  redis         Uses Redis to store counters\n  redis_cached  Uses Redis to store counters, with an in-memory cache\n\nArguments:\n  <LIMITS_FILE>  The limit file to use\n\nOptions:\n  -b, --rls-ip <ip>\n          The IP to listen on for RLS [default: 0.0.0.0]\n  -p, --rls-port <port>\n          The port to listen on for RLS [default: 8081]\n  -B, --http-ip <http_ip>\n          The IP to listen on for HTTP [default: 0.0.0.0]\n  -P, --http-port <http_port>\n          The port to listen on for HTTP [default: 8080]\n  -l, --limit-name-in-labels\n          Include the Limit Name in prometheus label\n  -v...\n          Sets the level of verbosity\n      --tracing-endpoint <tracing_endpoint>\n          The endpoint for the tracing service\n      --validate\n          Validates the LIMITS_FILE and exits\n  -H, --rate-limit-headers <rate_limit_headers>\n          Enables rate limit response headers [default: NONE] [possible values: NONE, DRAFT_VERSION_03]\n  -h, --help\n          Print help\n  -V, --version\n          Print version\n

    When using environment variables, these will override the defaults. While environment variable are themselves overridden by the command line arguments provided. See the individual STORAGES help for more options relative to each of the storages.

    The OpenAPI spec of the HTTP service is here.

    Limitador has to be started with a YAML file that has some limits defined. There's an example file that allows 10 requests per minute and per user_id when the HTTP method is \"GET\" and 5 when it is a \"POST\". You can run it with Docker (replace latest with the version you want):

    docker run --rm --net=host -it -v $(pwd)/examples/limits.yaml:/home/limitador/my_limits.yaml:ro quay.io/kuadrant/limitador:latest limitador-server /home/limitador/my_limits.yaml\n

    You can also use the YAML file when running locally:

    cargo run --release --bin limitador-server ./examples/limits.yaml\n

    If you want to use Limitador with Envoy, there's a minimal Envoy config for testing purposes here. The config forwards the \"userid\" header and the request method to Limitador. It assumes that there's an upstream API deployed on port 1323. You can use echo, for example.

    Limitador has several options that can be configured via ENV. This doc specifies them.

    "},{"location":"limitador/limitador-server/#limits-storage","title":"Limits storage","text":"

    Limitador can store its limits and counters in-memory, disk or in Redis. In-memory is faster, but the limits are applied per instance. When using Redis, multiple instances of Limitador can share the same limits, but it's slower.

    "},{"location":"limitador/limitador-server/kubernetes/","title":"Kubernetes","text":"

    The purpose of this documentation is to deploy a sample application published via AWS ELB, that will be ratelimited at infrastructure level, thanks to the use the envoyproxy sidecar container, that will be in charge of contacting to a ratelimit service (limitador), that will allow the request (or not) if it is within the permitted limits.

    There are mainly two recommended way of using limitador in kubernetes:

    1. There is an ingress based on envoyproxy that contacts with limitador ratelimit service before forwarding (or not) the request to the application
    2. There is an envoyproxy sidecar container living in the application pod that contacts with limitador ratelimit service before forwarding (or not) the request to the main application container in the same pod

    In this example it will be described the second scenario (where there is an application with an envoyproxy sidecar container contacting to limitador service).

    NOTE If you don't want to manually manage the sidecar container definitions on your deployments (harcoding the container spec, loading the envoy configuration from a configmap that requires a pod restart to reload possibles configuration changes...), you can use marin3r, a light weight envoy control plane that allows you to inject envoyproxy sidecar containers and dynamically consume configs from Kubernetes custom resources.

    This is the network diagram of the deployed example:

    "},{"location":"limitador/limitador-server/kubernetes/#components","title":"Components","text":"

    In order to that that ratelimit test, you need to deploy a few components. Some of them are mandatory, and a few are optional:

    "},{"location":"limitador/limitador-server/kubernetes/#mandatory","title":"Mandatory","text":"
    • Application (a sample application deployment called kuard):
    • App has an envoyproxy sidecar container with its configuration file in a configmap, composed by:
      • Cluster kuard points to main application container (127.0.0.1:8080)
      • Cluster kuard_ratelimit points to limitador headless service (limitador:8081)
      • Listener HTTP points to envoyproxy sidecar (0.0.0.0:38080)
      • When envoy contacts with the ratelimit service, you can define a timeout, and if there is no response within that timeout (because ratelimit is overloaded taking more time to process the request, or because rateliit service is down), you can choose from envoy to deny the request or pass it to the application. In this case, there is set a 1s timeout, and if there is no answer in this 1 second, request is passed to the application (failure_mode_deny: false), so we guarantee that the maximum overhead added by a non working ratelimit service is 1 extra second to the final response time.
    • App service published with type: LoadBalancer, which creates a AWS ELB. This service has an annotation to enable proxy protocol on the AWS Load balancer in order to be able to keep the real client IP at envoy level (instead of the k8s node private IP), so it can be used to ratelimit per each real client IP if desired.

    • Ratelimit application (a deployment called limitador):

    • Limitador Configmap with limits definition (1000 rps per hostname).
    • Limitador headless service published on limitador:8081. It is important to use a headless service in order to balance correctly the traffic between limitador pods, otherwise GRPC connections are not well balanced.

    • Redis database to persist ratelimit configuration:

    • Redis service
    • Redis statefulset with a persistent volume
    "},{"location":"limitador/limitador-server/kubernetes/#optional","title":"Optional","text":"
    • Centos pod:
    • Used to executed hey tool benchmarks from the cluster, so we ensure network latency does not affect the results. Actually, to achieve better results, this pod should be on another cluster (to not share the network between client and network) and be placed on the same Region (to reduce latency). The client could be a bottle neck for the performance test.
    • This centos is going to public AWS ELB to access the app, so simulating it is a normal client from the same Region
    • Prometheus monitoring and grafana dashboard resources
    "},{"location":"limitador/limitador-server/kubernetes/#k8s-deployment","title":"K8s deployment","text":"
    • Deploy the redis instance that will keep the limits for different limitador pods:

      kubectl apply -f redis-service.yaml\nkubectl apply -f redis-statefulset.yaml\n

    • Deploy limitador application. It is important to create the configmap with limitador limits before the deployment, in order to load it from limitador pods. At the moment, if you update the limits configmap you need to restart the pods. Additionally, limitador has an API in order to load limits dynamically, but for simplicity for this test a configmap has been used:

      kubectl apply -f limitador-config-configmap.yaml\nkubectl apply -f limitador-service.yaml\nkubectl apply -f limitador-deployment.yaml\n

    • Deploy sample kuard application with the envoyproxy sidecar container (if you do any change on the envoy configmap, remember you need to restart app pods in order to reload the config):

      kubectl apply -f kuard-envoy-config-configmap.yaml\nkubectl apply -f kuard-service.yaml\nkubectl apply -f kuard-deployment.yaml\n

    • At this point you shoud see all pods running, and kuard pods should have 2 containers (the main kuard container, and the envoyproxy sidecar container):

      \u25b6 kubectl get pods\nNAME                         READY   STATUS    RESTARTS   AGE\nkuard-f859bb896-gmzxn        2/2     Running   0          7m\nkuard-f859bb896-z95w8        2/2     Running   0          7m\nlimitador-68d494f54c-qv996   1/1     Running   0          8m\nlimitador-68d494f54c-zzmhn   1/1     Running   0          8m\nredis-0                      1/1     Running   0          9m\n

    • Now you should be able to access to kuard application using the load balancer DNS name:

      \u25b6 kubectl get service kuard\nNAME    TYPE           CLUSTER-IP       EXTERNAL-IP                                                              PORT(S)        AGE\nkuard   LoadBalancer   172.30.117.198   a96d5449fbc3f4cd892e15e5b36cde48-457963259.us-east-1.elb.amazonaws.com   80:31450/TCP   4m\n

    • If you go to the browser and paste the EXTERNAL-IP, your request will follow the next workflow:

    • The requests will go from your local machine through internet to the public AWS ELB where the app is published
    • Then it will go to the NodePort of your k8s cluster nodes
    • Once on a k8s node, it will go to kuard Service Virtual IP, and will arrive to an envoyproxy sidecar container inside kuard pod
    • Envoyproxy sidecar container will contact with limitador headless Service, to authorize the requests or not:
      • If the request is authorized (within the configured limits), it will send the request to the app container (0.0.0.0:8080) in the same pod, and request will end up with a HTTP 200 response
      • If the request is limited (beyond the limits), request will end up with HTTP 429 response
    "},{"location":"limitador/limitador-server/kubernetes/#monitoring","title":"Monitoring","text":"

    Both envoyproxy sidecar and limitador applications include built-in prometheus metrics.

    "},{"location":"limitador/limitador-server/kubernetes/#prometheus","title":"Prometheus","text":"

    In order to scrape that metrics within a prometheus-operator deployed in the cluster, you need to create a PodMonitor resource for every application:

    kubectl apply -f kuard-podmonitor.yaml\nkubectl apply -f limitador-podmonitor.yaml\n

    "},{"location":"limitador/limitador-server/kubernetes/#grafana-dashboard","title":"Grafana dashboard","text":"

    Then, if you have grafana deployed in the cluster, you can import a Kuadrant Limitador grafana dashboard that we have prepared, which includes:

    • Kuard envoyproxy sidecar metrics (globally and per pod)
    • Limitador metrics (globally and per pod)
    • And for every deployed component (limitador, kuard, redis):
    • Number of pods (total, available, unavaible, pod restarts...)
    • CPU usage per pod
    • Memory usage per pod
    • Network usage per pod
    "},{"location":"limitador/limitador-server/kubernetes/#benchmarking","title":"Benchmarking","text":"
    • In order to check that the ratelimit is working as expected, you can use any benchmarking tool, like hey
    • You can use if you want a centos pod (better to create it on a different custer within the same Region):
      kubectl apply -f centos-pod.yaml\n
    • Connect to centos pod:
      kubectl exec --stdin --tty centos -- /bin/bash\n
    • And install hey with:
      [root@centos /]# curl -sf https://gobinaries.com/rakyll/hey | sh\n
    • Now you can execute the benchmark using the following escenario:
    Item Value Target AWS ELB DNS Name App pods 2 Limitador pods 2 Limits 1.000 rps per hostname Hey duration 1 minute Hey Traffic -c 60 -q 20 (around 1.200 rps)
    • Theoretically:
    • It should let pass 1.000 requests, and limit 200 requests per second
    • It should let pass 60 * 1.000 = 60.0000 requests, and limit 60 * 200 = 12.000 requests per minute
    • Each limitador pod should handle half of the traffic (500 rps OK, and 200 rps limited)
    [root@centos /]# hey -z 60s -c 60 -q 20 \"http://a96d5449fbc3f4cd892e15e5b36cde48-457963259.us-east-1.elb.amazonaws.com\"\n\nSummary:\n  Total:    60.0131 secs\n  Slowest:  0.1028 secs\n  Fastest:  0.0023 secs\n  Average:  0.0075 secs\n  Requests/sec: 1199.3721\n\n  Total data:   106581650 bytes\n  Size/request: 1480 bytes\n\nResponse time histogram:\n  0.002 [1] |\n  0.012 [70626] |\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\u25a0\n  0.022 [1291]  |\u25a0\n  0.032 [0] |\n  0.043 [0] |\n  0.053 [0] |\n  0.063 [0] |\n  0.073 [0] |\n  0.083 [0] |\n  0.093 [23]    |\n  0.103 [37]    |\n\n\nLatency distribution:\n  10% in 0.0053 secs\n  25% in 0.0063 secs\n  50% in 0.0073 secs\n  75% in 0.0085 secs\n  90% in 0.0096 secs\n  95% in 0.0102 secs\n  99% in 0.0139 secs\n\nDetails (average, fastest, slowest):\n  DNS+dialup:   0.0001 secs, 0.0023 secs, 0.1028 secs\n  DNS-lookup:   0.0001 secs, 0.0000 secs, 0.0711 secs\n  req write:    0.0000 secs, 0.0000 secs, 0.0014 secs\n  resp wait:    0.0074 secs, 0.0023 secs, 0.0303 secs\n  resp read:    0.0000 secs, 0.0000 secs, 0.0049 secs\n\nStatus code distribution:\n  [200] 60046 responses\n  [429] 11932 responses\n
    • We can see that:

      • Client could send 1192.2171rps (about 1200rps)
      • 60046 requests (about 60000) were OK (HTTP 200)
      • 11932 requests (about 12000) were limited (HTTP 429)
      • Average latency (since the request goes out from the client to AWS ELB, k8s node, envoyproxy container, limitador+redis, kuar app container) is 10ms
    • In addition, if we do a longer test with 5 minutes traffic for example, you can check with the grafana dashboard how these requests are processed by envoyproxy sidecar container of kuard pods and limitador pods:

      • Kuard Envoyproxy Sidecar Metrics:
        • Globally it handles around 1200rps: it permits around 1krps and limits around 200rps
        • Each envoyproxy sidecar of each kuard pod handles around half of the traffic: it permits around 500rps and limits around 100rps. The balance between pods is not 100% perfect, caused by random iptables forwarding when using a k8s service
      • Limitador Metrics:
        • Globally it handles around 1200rps: it permits around 1krps and limits around 200rps
        • Each limitador pod handles around half of the traffic: it permits around 500rps and limits around 100rps. The balance between pods is perfect thanks to using a headless service with GRPC connections
    "},{"location":"limitador/limitador-server/sandbox/","title":"Index","text":""},{"location":"limitador/limitador-server/sandbox/#testing-environment","title":"Testing Environment","text":""},{"location":"limitador/limitador-server/sandbox/#requirements","title":"Requirements","text":"
    • docker v24+
    "},{"location":"limitador/limitador-server/sandbox/#setup","title":"Setup","text":"

    Clone the project

    git clone https://github.com/Kuadrant/limitador.git\ncd limitador/limitador-server/sandbox\n

    Check out make help for all the targets.

    "},{"location":"limitador/limitador-server/sandbox/#deployment-options","title":"Deployment options","text":"Limitador's configuration Command Info In-memory configuration make deploy-in-memory Counters are held in Limitador (ephemeral) Redis make deploy-redis Uses Redis to store counters Redis Secured make deploy-redis-tls Uses Redis with TLS and password protected to store counters Redis Cached make deploy-redis-cached Uses Redis to store counters, with an in-memory cache Redis Otel Instrumented make deploy-redis-otel Uses redis to store counters, instrumented with opentelemetry Disk make deploy-disk Uses disk to store counters Distributed make deploy-distributed Counters are held in Limitador (ephemeral) but replicated to other Limitador servers.

    | Distributed 3 Node | make deploy-distributed-3-node | Counters are held in Limitador (ephemeral) but replicated to 3 other Limitador servers. |

    "},{"location":"limitador/limitador-server/sandbox/#running-multi-node-distributed-deployments","title":"Running Multi Node Distributed Deployments","text":"

    The make deploy-distributed target can be connected to other Limitador servers but requires you to set the PEER_ID and PEER_URLS environment variables when you run the target.

    If you have 3 servers you want to replicate between, you would run the following commands:

    # on server where: hostname=server1\nPEER_ID=`hostname` PEER_URLS=\"http://server2:15001 http://server3:15001\" make deploy-distributed\n
    # on server where: hostname=server2\nPEER_ID=`hostname` PEER_URLS=\"http://server1:15001 http://server3:15001\" make deploy-distributed\n
    # on server where: hostname=server3\nPEER_ID=`hostname` PEER_URLS=\"http://server1:15001 http://server2:15001\" make deploy-distributed\n

    The PEER_ID just need to be unique between the servers, and the PEER_URLS should be a space-separated list of the other servers' URLs.

    "},{"location":"limitador/limitador-server/sandbox/#limitadors-admin-http-endpoint","title":"Limitador's admin HTTP endpoint","text":"

    Limits

    curl -i http://127.0.0.1:18080/limits/test_namespace\n

    Counters

    curl -i http://127.0.0.1:18080/counters/test_namespace\n

    Metrics

    curl -i http://127.0.0.1:18080/metrics\n
    "},{"location":"limitador/limitador-server/sandbox/#limitadors-grpc-ratelimitservice-endpoint","title":"Limitador's GRPC RateLimitService endpoint","text":"

    Get grpcurl. You need Go SDK installed.

    Golang version >= 1.18 (from fullstorydev/grpcurl)

    make grpcurl\n

    Inspect RateLimitService GRPC service

    bin/grpcurl -plaintext 127.0.0.1:18081 describe envoy.service.ratelimit.v3.RateLimitService\n

    Make a custom request

    bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM\n{\n    \"domain\": \"test_namespace\",\n    \"hits_addend\": 1,\n    \"descriptors\": [\n        {\n            \"entries\": [\n                {\n                    \"key\": \"req.method\",\n                    \"value\": \"POST\"\n                },\n                {\n                    \"key\": \"req.path\",\n                    \"value\": \"/\"\n                }\n            ]\n        }\n    ]\n}\nEOM\n

    Do repeated requests. As the limit is set to max 5 request for 60 seconds, you should see OVER_LIMIT response after 5 requests.

    while :; do bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM; sleep 1; done\n{\n    \"domain\": \"test_namespace\",\n    \"hits_addend\": 1,\n    \"descriptors\": [\n        {\n            \"entries\": [\n                {\n                    \"key\": \"req.method\",\n                    \"value\": \"POST\"\n                },\n                {\n                    \"key\": \"req.path\",\n                    \"value\": \"/\"\n                }\n            ]\n        }\n    ]\n}\nEOM\n
    "},{"location":"limitador/limitador-server/sandbox/#downstream-traffic","title":"Downstream traffic","text":"

    Upstream service implemented by httpbin.org

    curl -i -H \"Host: example.com\" http://127.0.0.1:18000/get\n
    "},{"location":"limitador/limitador-server/sandbox/#load-testing-the-grpc-ratelimitservice-directly","title":"Load Testing the GRPC RateLimitService directly","text":"

    This load test will use grpcurl. You need Go SDK installed.

    Run a load test a 5000 requests per second (RPS) for 10 seconds:

    RPS=5000 make load-test\n
    "},{"location":"limitador/limitador-server/sandbox/#load-testing-via-envoy-proxy","title":"Load Testing via Envoy Proxy","text":"
    cargo run --manifest-path loadtest/Cargo.toml  --package loadtest --release -- --report-file=report.htm\n

    The report will be saved in report.htm file.

    "},{"location":"limitador/limitador-server/sandbox/#limitador-image","title":"Limitador Image","text":"

    By default, the sandbox will run Limitador's limitador-testing:latest image.

    Building limitador-testing:latest image

    You can easily build the limitador's image from the current workspace code base with:

    make build\n

    The image will be tagged with limitador-testing:latest

    Using custom Limitador's image

    The LIMITADOR_IMAGE environment variable overrides the default image. For example:

    make deploy-in-memory LIMITADOR_IMAGE=quay.io/kuadrant/limitador:latest\n
    "},{"location":"limitador/limitador-server/sandbox/#clean-env","title":"Clean env","text":"
    make clean\n
    "},{"location":"limitador/limitador-server/sandbox/redis-otel/","title":"Limitador instrumentation sandbox","text":"

    Limitador is configured to push traces to an opentelemetry collector.

    "},{"location":"limitador/limitador-server/sandbox/redis-otel/#run-sandbox","title":"Run sandbox","text":"
    make build\nmake deploy-redis-otel\n
    "},{"location":"limitador/limitador-server/sandbox/redis-otel/#run-some-traffic","title":"Run some traffic","text":"
    make grpcurl\n
    bin/grpcurl -plaintext -d @ 127.0.0.1:18081 envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit <<EOM\n{\n    \"domain\": \"test_namespace\",\n    \"hits_addend\": 1,\n    \"descriptors\": [\n        {\n            \"entries\": [\n                {\n                    \"key\": \"req.method\",\n                    \"value\": \"POST\"\n                }\n            ]\n        }\n    ]\n}\nEOM\n
    "},{"location":"limitador/limitador-server/sandbox/redis-otel/#see-the-trace-in-ui","title":"See the trace in UI","text":"
    firefox -private-window \"localhost:16686\"\n

    Recommended to start looking at check_and_update operation.

    "},{"location":"limitador/limitador-server/sandbox/redis-otel/#tear-down-sandbox","title":"Tear down sandbox","text":"
    make clean\n
    "},{"location":"limitador/limitador-server/sandbox/redis-tls/","title":"Index","text":""},{"location":"limitador/limitador-server/sandbox/redis-tls/#testing-redis-security","title":"Testing redis security","text":"

    Execute bash shell in redis pod

    docker compose -p sandbox exec redis /bin/bash\n

    Connect to this Redis server with redis-cli:

    root@e024a29b74ba:/data# redis-cli --tls --cacert /usr/local/etc/redis/certs/ca.crt -a foobared\n
    "},{"location":"limitador-operator/","title":"Limitador Operator","text":""},{"location":"limitador-operator/#overview","title":"Overview","text":"

    The Operator to manage Limitador deployments.

    "},{"location":"limitador-operator/#customresourcedefinitions","title":"CustomResourceDefinitions","text":"
    • Limitador, which defines a desired Limitador deployment.
    "},{"location":"limitador-operator/#limitador-crd","title":"Limitador CRD","text":"

    Limitador v1alpha1 API reference

    Example:

    ---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  listener:\n    http:\n      port: 8080\n    grpc:\n      port: 8081\n  limits:\n\n    - conditions: [\"get_toy == 'yes'\"]\n      max_value: 2\n      namespace: toystore-app\n      seconds: 30\n      variables: []\n
    "},{"location":"limitador-operator/#features","title":"Features","text":"
    • Storage Options
    • Rate Limit Headers
    • Logging
    • Tracing
    • Custom Image
    "},{"location":"limitador-operator/#contributing","title":"Contributing","text":"

    The Development guide describes how to build the operator and how to test your changes before submitting a patch or opening a PR.

    Join us on the #kuadrant channel in the Kubernetes Slack workspace, for live discussions about the roadmap and more.

    "},{"location":"limitador-operator/#licensing","title":"Licensing","text":"

    This software is licensed under the Apache 2.0 license.

    See the LICENSE and NOTICE files that should have been provided along with this software for details.

    "},{"location":"limitador-operator/doc/custom-image/","title":"Custom Image","text":"

    Currently, the limitador image being used in the deployment is read from different sources with some order of precedence:

    • If Limtador CR's spec.image is set -> image = ${spec.image}
    • If Limtador CR's spec.version is set -> image = quay.io/kuadrant/limitador:${spec.version} (note the repo is hardcoded)
    • if RELATED_IMAGE_LIMITADOR env var is set -> image = $RELATED_IMAGE_LIMITADOR
    • else: hardcoded to quay.io/kuadrant/limitador:latest

    The spec.image field is not meant to be used in production environments. It is meant to be used for dev/testing purposes. The main drawback of the spec.image usage is that upgrades cannot be supported as the limitador operator cannot ensure the operation to be safe.

    ---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-instance-1\nspec:\n  image: example.com/myorg/limitador-repo:custom-image-v1\nEOF\n
    "},{"location":"limitador-operator/doc/custom-image/#pull-an-image-from-a-private-registry","title":"Pull an Image from a Private Registry","text":"

    To pull an image from a private container image registry or repository, you need to provide credentials.

    Create a Secret of type kubernetes.io/dockerconfigjson by providing credentials. For example, using kubectl tool with the following command line:

    kubectl create secret docker-registry regcred --docker-server=<your-registry-server> --docker-username=<your-name> --docker-password=<your-pword>\n

    That will create a secret named regcred.

    Deploy limitador instance with the imagePullSecrets field having a reference to the regcred.

    ---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-instance-1\nspec:\n  image: example.com/myorg/limitador-repo:custom-image-v1\n  imagePullSecrets:\n\n  - name: regcred\n

    NOTE: It is mandatory that the secret and limitador CR are created in the same namespace.

    "},{"location":"limitador-operator/doc/development/","title":"Development Guide","text":""},{"location":"limitador-operator/doc/development/#technology-stack-required-for-development","title":"Technology stack required for development","text":"
    • operator-sdk version 1.32.0
    • kind version v0.22.0
    • git
    • go version 1.21+
    • kubernetes version v1.25+
    • kubectl version v1.25+
    "},{"location":"limitador-operator/doc/development/#build","title":"Build","text":"
    make\n
    "},{"location":"limitador-operator/doc/development/#run-locally","title":"Run locally","text":"

    You need an active session open to a kubernetes cluster.

    Optionally, run kind with local-env-setup.

    make local-env-setup\n

    Then, run the operator locally

    make run\n
    "},{"location":"limitador-operator/doc/development/#deploy-the-operator-in-a-deployment-object","title":"Deploy the operator in a deployment object","text":"
    make local-setup\n
    "},{"location":"limitador-operator/doc/development/#deploy-the-operator-using-olm","title":"Deploy the operator using OLM","text":"

    You can deploy the operator using OLM just running a few commands. No need to build any image. Kuadrant engineering team provides latest and released version tagged images. They are available in the Quay.io/Kuadrant image repository.

    Create kind cluster

    make kind-create-cluster\n

    Deploy OLM system

    make install-olm\n

    Deploy the operator using OLM. The make deploy-catalog target accepts the following variables:

    Makefile Variable Description Default value CATALOG_IMG Catalog image URL quay.io/kuadrant/limitador-operator-catalog:latest
    make deploy-catalog [CATALOG_IMG=quay.io/kuadrant/limitador-operator-catalog:latest]\n
    "},{"location":"limitador-operator/doc/development/#build-custom-olm-catalog","title":"Build custom OLM catalog","text":"

    If you want to deploy (using OLM) a custom limitador operator, you need to build your own catalog.

    "},{"location":"limitador-operator/doc/development/#build-operator-bundle-image","title":"Build operator bundle image","text":"

    The make bundle target accepts the following variables:

    Makefile Variable Description Default value Notes IMG Operator image URL quay.io/kuadrant/limitador-operator:latest VERSION Bundle version 0.0.0 RELATED_IMAGE_LIMITADOR Limitador bundle URL quay.io/kuadrant/limitador:latest LIMITADOR_VERSION var could be use to build this URL providing the tag CHANNELS Bundle channels used in the bundle, comma separated alpha DEFAULT_CHANNEL The default channel used in the bundle alpha
    • Build the bundle manifests
    make bundle [IMG=quay.io/kuadrant/limitador-operator:latest] \\\n            [VERSION=0.0.0] \\\n            [RELATED_IMAGE_LIMITADOR=quay.io/kuadrant/limitador:latest] \\\n            [CHANNELS=alpha] \\\n            [DEFAULT_CHANNEL=alpha]\n
    • Build the bundle image from the manifests
    Makefile Variable Description Default value BUNDLE_IMG Operator bundle image URL quay.io/kuadrant/limitador-operator-bundle:latest
    make bundle-build [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]\n
    • Push the bundle image to a registry
    Makefile Variable Description Default value BUNDLE_IMG Operator bundle image URL quay.io/kuadrant/limitador-operator-bundle:latest
    make bundle-push [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]\n
    "},{"location":"limitador-operator/doc/development/#build-custom-catalog","title":"Build custom catalog","text":"

    The catalog format will be File-based Catalog.

    Make sure all the required bundles are pushed to the registry. It is required by the opm tool.

    The make catalog target accepts the following variables:

    Makefile Variable Description Default value BUNDLE_IMG Operator bundle image URL quay.io/kuadrant/limitador-operator-bundle:latest DEFAULT_CHANNEL Catalog default channel alpha
    make catalog [BUNDLE_IMG=quay.io/kuadrant/limitador-operator-bundle:latest] [DEFAULT_CHANNEL=alpha]\n
    • Build the catalog image from the manifests
    Makefile Variable Description Default value CATALOG_IMG Operator catalog image URL quay.io/kuadrant/limitador-operator-catalog:latest
    make catalog-build [CATALOG_IMG=quay.io/kuadrant/limitador-operator-catalog:latest]\n
    • Push the catalog image to a registry
    make catalog-push [CATALOG_IMG=quay.io/kuadrant/limitador-operator-bundle:latest]\n

    You can try out your custom catalog image following the steps of the Deploy the operator using OLM section.

    "},{"location":"limitador-operator/doc/development/#cleaning-up","title":"Cleaning up","text":"
    make local-cleanup\n
    "},{"location":"limitador-operator/doc/development/#run-tests","title":"Run tests","text":""},{"location":"limitador-operator/doc/development/#unittests","title":"Unittests","text":"
    make test-unit\n

    Optionally, add TEST_NAME makefile variable to run specific test

    make test-unit TEST_NAME=TestConstants\n

    or even subtest

    make test-unit TEST_NAME=TestLimitIndexEquals/empty_indexes_are_equal\n
    "},{"location":"limitador-operator/doc/development/#integration-tests","title":"Integration tests","text":"

    You need an active session open to a kubernetes cluster.

    Optionally, run local cluster with kind

    make local-env-setup\n

    Run integration tests

    make test-integration\n
    "},{"location":"limitador-operator/doc/development/#all-tests","title":"All tests","text":"

    You need an active session open to a kubernetes cluster.

    Optionally, run local cluster with kind

    make local-env-setup\n

    Run all tests

    make test\n
    "},{"location":"limitador-operator/doc/development/#lint-tests","title":"Lint tests","text":"
    make run-lint\n
    "},{"location":"limitador-operator/doc/development/#uninstall-limitador-crd","title":"(Un)Install Limitador CRD","text":"

    You need an active session open to a kubernetes cluster.

    Remove CRDs

    make uninstall\n
    "},{"location":"limitador-operator/doc/logging/","title":"Logging","text":"

    The limitador operator outputs 3 levels of log messages: (from lowest to highest level)

    1. debug
    2. info (default)
    3. error

    info logging is restricted to high-level information. Actions like creating, deleting or updating kubernetes resources will be logged with reduced details about the corresponding objects, and without any further detailed logs of the steps in between, except for errors.

    Only debug logging will include processing details.

    To configure the desired log level, set the environment variable LOG_LEVEL to one of the supported values listed above. Default log level is info.

    Apart from log level, the controller can output messages to the logs in 2 different formats:

    • production (default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
    • development: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}

    To configure the desired log mode, set the environment variable LOG_MODE to one of the supported values listed above. Default log mode is production.

    "},{"location":"limitador-operator/doc/rate-limit-headers/","title":"Rate Limit Headers","text":"

    It enables RateLimit Header Fields for HTTP as specified in Rate Limit Headers Draft

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  rateLimitHeaders: DRAFT_VERSION_03\n

    Current valid values are:

    • DRAFT_VERSION_03 (ref: Rate Limit Headers Draft)
    • NONE

    By default, when spec.rateLimitHeaders is null, --rate-limit-headers command line arg is not included in the limitador's deployment.

    "},{"location":"limitador-operator/doc/resource-requirements/","title":"Resource Requirements","text":"

    The default resource requirement for Limitador deployments is specified in Limitador v1alpha1 API reference and will be applied if the resource requirement is not set in the spec.

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  listener:\n    http:\n      port: 8080\n    grpc:\n      port: 8081\n  limits:\n\n    - conditions: [\"get_toy == 'yes'\"]\n      max_value: 2\n      namespace: toystore-app\n      seconds: 30\n      variables: []  \n
    Field json/yaml field Type Required Default value Description ResourceRequirements resourceRequirements *corev1.ResourceRequirements No {\"limits\": {\"cpu\": \"500m\",\"memory\": \"64Mi\"},\"requests\": {\"cpu\": \"250m\",\"memory\": \"32Mi\"}} Limitador deployment resource requirements"},{"location":"limitador-operator/doc/resource-requirements/#example-with-resource-limits","title":"Example with resource limits","text":"

    The resource requests and limits for the deployment can be set like the following:

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  listener:\n    http:\n      port: 8080\n    grpc:\n      port: 8081\n  limits:\n\n    - conditions: [\"get_toy == 'yes'\"]\n      max_value: 2\n      namespace: toystore-app\n      seconds: 30\n      variables: []\n  resourceRequirements:\n    limits:\n      cpu: 200m\n      memory: 400Mi\n    requests:\n      cpu: 101m  \n      memory: 201Mi    \n

    To specify the deployment without resource requests or limits, set an empty struct {} to the field:

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  listener:\n    http:\n      port: 8080\n    grpc:\n      port: 8081\n  limits:\n\n    - conditions: [ \"get_toy == 'yes'\" ]\n      max_value: 2\n      namespace: toystore-app\n      seconds: 30\n      variables: []\n  resourceRequirements: {}\n

    "},{"location":"limitador-operator/doc/storage/","title":"Storage","text":"

    Limitador limits counters are stored in a backend storage. This is In contrast to the storage of the limits themselves, which are always stored in ephemeral memory. Limitador's operator supports several storage configurations:

    • In-Memory: ephemeral and cannot be shared
    • Redis: Persistent (depending on the redis storage configuration) and can be shared
    • Redis Cached: Persistent (depending on the redis storage configuration) and can be shared
    • Disk: Persistent (depending on the underlying disk persistence capabilities) and cannot be shared
    "},{"location":"limitador-operator/doc/storage/#in-memory","title":"In-Memory","text":"

    Counters are held in Limitador (ephemeral)

    In-Memory is the default option defined by the Limitador's Operator.

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  storage: null\n

    For any of those, one should store the URL of the Redis service, inside a K8s opaque Secret.

    apiVersion: v1\nkind: Secret\nmetadata:\n  name: redisconfig\nstringData:\n  URL: redis://127.0.0.1/a # Redis URL of its running instance\ntype: Opaque\n
    "},{"location":"limitador-operator/doc/storage/#redis","title":"Redis","text":"

    Uses Redis to store counters.

    Selected when spec.storage.redis is not null.

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  storage:\n    redis:\n      configSecretRef: # The secret reference storing the URL for Redis\n        name: redisconfig\n

    The URL of the Redis service is provided inside a K8s opaque Secret. The secret is required to be in the same namespace as the Limitador CR.

    apiVersion: v1\nkind: Secret\nmetadata:\n  name: redisconfig\nstringData:\n  URL: redis://127.0.0.1/a # Redis URL of its running instance\ntype: Opaque\n

    Note: Limitador's Operator will only read the URL field of the secret.

    "},{"location":"limitador-operator/doc/storage/#redis-cached","title":"Redis Cached","text":"

    Uses Redis to store counters, with an in-memory cache.

    Selected when spec.storage.redis-cached is not null.

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  storage:\n    redis-cached:\n      configSecretRef: # The secret reference storing the URL for Redis\n        name: redisconfig\n

    The URL of the Redis service is provided inside a K8s opaque Secret. The secret is required to be in the same namespace as the Limitador CR.

    apiVersion: v1\nkind: Secret\nmetadata:\n  name: redisconfig\nstringData:\n  URL: redis://127.0.0.1/a # Redis URL of its running instance\ntype: Opaque\n

    Note: Limitador's Operator will only read the URL field of the secret.

    Additionally, caching options can be specified in the spec.storage.redis-cached.options field.

    "},{"location":"limitador-operator/doc/storage/#options","title":"Options","text":"Option Description batch-size Size of entries to flush in as single flush [default: 100] flush-period Flushing period for counters in milliseconds [default: 1000] max-cached Maximum amount of counters cached [default: 10000] response-timeout Timeout for Redis commands in milliseconds [default: 350]

    For example:

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  storage:\n    redis-cached:\n      configSecretRef: # The secret reference storing the URL for Redis\n        name: redisconfig\n      options: # Every option is optional\n        batch-size: 50\n        max-cached: 5000\n
    "},{"location":"limitador-operator/doc/storage/#disk","title":"Disk","text":"

    Counters are held on disk (persistent). Kubernetes Persistent Volumes will be used to store counters.

    Selected when spec.storage.disk is not null.

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  storage:\n    disk: {}\n

    Additionally, disk options can be specified in the spec.storage.disk.persistentVolumeClaim and spec.storage.disk.optimize fields.

    "},{"location":"limitador-operator/doc/storage/#persistent-volume-claim-options","title":"Persistent Volume Claim Options","text":"

    spec.storage.disk.persistentVolumeClaim field is an object with the following fields.

    Field Description storageClassName StorageClass of the storage offered by cluster administrators [default: default storage class of the cluster] resources The minimum resources the volume should have. Resources will not take any effect when VolumeName is provided. This parameter is not updateable when the underlying PV is not resizable. [default: 1Gi] volumeName The binding reference to the existing PersistentVolume backing this claim [default: null]

    Example:

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  storage:\n    disk:\n      persistentVolumeClaim:\n        storageClassName: \"customClass\"\n        resources:\n          requests: 2Gi\n
    "},{"location":"limitador-operator/doc/storage/#optimize","title":"Optimize","text":"

    Defines the valid optimization option of the disk persistence type.

    spec.storage.disk.optimize field is a string type with the following valid values:

    Option Description throughput Optimizes for higher throughput. Default disk Optimizes for disk usage

    Example:

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  storage:\n    disk:\n      optimize: disk\n
    "},{"location":"limitador-operator/doc/tracing/","title":"Tracing","text":"

    Limitador offers distributed tracing enablement using the .spec.tracing CR configuration:

    ---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador-sample\nspec:\n  listener:\n    http:\n      port: 8080\n    grpc:\n      port: 8081\n  limits:\n\n    - conditions: [\"get_toy == 'yes'\"]\n      max_value: 2\n      namespace: toystore-app\n      seconds: 30\n      variables: []\n  verbosity: 3\n  tracing:\n    endpoint: rpc://my-otlp-collector:4317\n

    Currently limitador only supports collectors using the OpenTelemetry Protocol with TLS disabled. The endpoint configuration option should contain the scheme, host and port of the service. The quantity and level of the information provided by the spans is configured via the verbosity argument.

    "},{"location":"architecture/docs/design/architectural-overview-v1/","title":"Kuadrant Architectural Overview","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#overview","title":"Overview","text":"

    Kuadrant provides connectivity, security and service protection capabilities in both a single and multi-cluster environment. It exposes these capabilities in the form of Kubernetes CRDs that implement the Gateway API concept of policy attachment. These policy APIs can target specific Gateway API resources such as Gateways and HTTPRoutes to extend their capabilities and configuration. They enable platform engineers to secure, protect and connect their infrastructure and allow application developers to self service and refine policies to their specific needs in order to protect exposed endpoints.

    "},{"location":"architecture/docs/design/architectural-overview-v1/#key-architectural-areas","title":"Key Architectural Areas","text":"
    • Kuadrant architecture is defined and implemented with both control plane and data plane components.
    • The control plane is where policies are exposed and expressed as Kubernetes APIs and reconciled by a policy controller.
    • The data plane is where Kuadrant's \"policy enforcement\" components exist. These components are configured by the control plane and integrate either directly with the Gateway provider or via external integrations.
    "},{"location":"architecture/docs/design/architectural-overview-v1/#10000m-architecture","title":"10000m Architecture","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#control-plane-components-and-responsibilities","title":"Control Plane Components and Responsibilities","text":"

    The control plane is a set of controllers and operators that are responsible for for installation and configuration of other components such as the data plane enforcement components and configuration of the Gateway to enable the data plane components to interact with incoming requests. The control plane also owns and reconciles the policy CRD APIs into more complex and specific configuration objects that the policy enforcement components consume in order to know the rules to apply to incoming requests or the configuration to apply to external integrations such as DNS and ACME providers.

    "},{"location":"architecture/docs/design/architectural-overview-v1/#kuadrant-operator","title":"Kuadrant Operator","text":"
    • Installation and configuration of other control plane components
    • Installation of data plane policy enforcement components via their respective control plane operators
    • Configures the Gateway via WASM plugin and other APIs to leverage the data plane components for auth and rate limiting on incoming requests.
    • Exposes RateLimitPolicy , AuthPolicy, DNSPolicy and TLSPolicy and reconciles these into enforceable configuration for the data plane.
    • Exposes Kuadrant and reconciles this to configure and trigger installation of the required data plane components and other control plane components.
    "},{"location":"architecture/docs/design/architectural-overview-v1/#limitador-operator","title":"Limitador Operator:","text":"
    • Installs and configures the Limitador data plane component based on the Limitador CR. Limits specified in the limitador CR are mountd via configmap into the limitador component.
    "},{"location":"architecture/docs/design/architectural-overview-v1/#authorino-operator","title":"Authorino Operator:","text":"
    • Installs and configures the Authorino data plane component based on the Authorino CR.
    "},{"location":"architecture/docs/design/architectural-overview-v1/#cert-manager","title":"Cert-Manager:","text":"
    • Manages TLS certificates for our components and for the Gateways. Consumes Certificate resources created by Kuadrant operator in response to the TLSPolicy.
    "},{"location":"architecture/docs/design/architectural-overview-v1/#dns-operator","title":"DNS Operator","text":"
    • DNS operator consumes DNSRecord resources that are configured via the DNSPolicy api and applies them into the targeted cloud DNS provider AWS, Azure and Google DNS are our main targets
    "},{"location":"architecture/docs/design/architectural-overview-v1/#data-plane-components-and-responsibilities","title":"Data Plane Components and Responsibilities","text":"

    The data plane components sit in the request flow and are responsible for enforcing configuration defined by policy and providing service protection capabilities based on configuration managed and created by the control plane.

    "},{"location":"architecture/docs/design/architectural-overview-v1/#limitador","title":"Limitador","text":"
    • Complies with the with Envoy rate limiting API to provide rate limiting to the gateway. Consumes limits from a configmap created based on the RateLimitPolicy API.
    "},{"location":"architecture/docs/design/architectural-overview-v1/#authorino","title":"Authorino","text":"
    • Complies with the Envoy external auth API to provide auth integration to the gateway. It provides both Authn and Authz. Consumes AuthConfigs created by the kuadrant operator based on the defined AuthPolicy API.
    "},{"location":"architecture/docs/design/architectural-overview-v1/#wasm-shim","title":"WASM Shim","text":"
    • Uses the Proxy WASM ABI Spec to integrate with Envoy and provide filtering and connectivity to Limitador (for request time enforcement of rate limiting) and Authorino (for request time enforcement of authentication & authorization).
    "},{"location":"architecture/docs/design/architectural-overview-v1/#single-cluster-layout","title":"Single Cluster Layout","text":"

    In a single cluster, you have the Kuadrant control plane and data plane sitting together. It is configured to integrate with Gateways on the same cluster and configure a DNS zone via a DNS provider secret (configured alongside a DNSPolicy). Storage of rate limit counters is possible but not required as they are not being shared.

    "},{"location":"architecture/docs/design/architectural-overview-v1/#multi-cluster","title":"Multi-Cluster","text":"

    In the default multi-cluster setup. Each individual cluster has Kuadrant installed. Each of these clusters are unaware of the other. They are effectively operating as single clusters. The multi-cluster aspect is created by sharing access with the DNS zone, using a shared host across the clusters and leveraging shared counter storage. The zone is operated on independently by each of DNS operator on both clusters to form a single cohesive record set. More details on this can be found in the following RFC. The rate limit counters can also be shared and used by different clusters in order to provide global rate limiting. This is achieved by connecting each instance of Limitador to a shared data store that uses the Redis protocol.

    Shown above is a multi-cluster multi ingress gateway topology. This might be used to support a geographically distributed system for example. However, it is also possible to leverage overlay networking tools such as Skupper that integrate at the Kubernetes service level to have a single gateway cluster that then integrates with multiple backends (on different clusters or in custom infrastructure).

    "},{"location":"architecture/docs/design/architectural-overview-v1/#observability","title":"Observability","text":"

    The Kuadrant architecture is intended to work with some popular monitoring tools for tracing, metrics and log aggregation. Those tools are:

    • Prometheus for scraping metrics - and optionally Thanos for high availability & federation
    • Loki for log aggregation - via log collectors like vector
    • Tempo for trace collecting
    • Grafana for visualing the above

    Depending on the number of clusters in your configuration, you may decide to have a monitoring system on the same cluster as workloads, or in a separate cluster completely. Below are 2 example architectures based on the single cluster and multi cluster layouts. In the single cluster architecture, the collector components (Prometheus, Vector and Tempo) are in the same cluster as the log aggregation (Loki) and visualisation component (Grafana).

    In the multi cluster architecture, the collectors that scrape metrics or logs (Prometheus & Vector) are deployed alongside the workloads in each cluster. However, as traces are sent to a collector (Tempo) from each component, it can be centralised in a separate cluster. Thanos is used in this architecutre so that each prometheus can federate metrics back to a central location. The log collector (vector) can forward logs to a central loki instance. Finally, the visualisation component (Grafana) is centralised as well, with data sources configured for each of the 3 components on the same cluster.

    "},{"location":"architecture/docs/design/architectural-overview-v1/#dependencies","title":"Dependencies","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#istio-or-envoy-gateway","title":"Istio or Envoy Gateway:","text":"
    • Gateway API provider that Kuadrant integrates with via WASM to provide service protection capabilities. Kuadrant configures Envoy Proxy via the Istio/Envoy Gateway control plane in order to enforce the applied policies and register components such as Authorino and Limitador.
    • Used by RateLimitPolicy and AuthPolicy
    "},{"location":"architecture/docs/design/architectural-overview-v1/#gateway-api-required","title":"Gateway API: Required","text":"
    • New standard for Ingress from the Kubernetes community
    • Gateway API is the core API that Kuadrant integrates with.
    "},{"location":"architecture/docs/design/architectural-overview/","title":"Kuadrant Architectural Overview [Draft]","text":""},{"location":"architecture/docs/design/architectural-overview/#overview","title":"Overview","text":"

    It is important to note that Kuadrant is not in itself a gateway provider. Kuadrant provides a set of valuable policy APIs that enhance Gateway API via its defined policy attachment extension point. The policy APIs are reconciled by a set of policy controllers and enforced via integration at different points to configure, enhance and secure the application connectivity provided via Gateway API and the underlying gateway provider. These policy extensions are focused around areas such as DNS management supporting global load balancing and health checks, alongside service protection specific APIs such as rate limiting and auth. Kuadrant also integrates with Open Cluster Management as a multi-cluster control plane to enable defining and distributing Gateways across multiple clusters, providing load balancing and tls management for these distributed gateways. These integrations and features can be managed centrally in a declarative way from the Open Cluster Management Hub using Kubernetes resources.

    "},{"location":"architecture/docs/design/architectural-overview/#key-architectural-areas","title":"Key Architectural Areas","text":"
    • The Kuadrant architecture is spread across a control plane and also a data plane. Kuadrant can work in both a single and multi-cluster context. Currently in order for all APIs to work in a single or multi-cluster context you need to have Open Cluster Management installed. While this may change in the future, this approach allows us to start with a single cluster and seamlessly scale as more clusters are added.
    • The control plane is where policies are exposed and expressed as kubernetes APIs and reconciled by the Kuadrant policy controllers.
    • The data plane is where Kuadrant's service protection components, configured by the control plane policies, are enforced within the gateway instance as part of the request flow.
    "},{"location":"architecture/docs/design/architectural-overview/#1000m-architecture","title":"1000m Architecture","text":""},{"location":"architecture/docs/design/architectural-overview/#control-plane-components-and-responsibilities","title":"Control Plane Components and Responsibilities","text":"

    A control plane component is something responsible for accepting instruction via a CRD based API and ensuring that configuration is manifested into state that can be acted on.

    "},{"location":"architecture/docs/design/architectural-overview/#kuadrant-operator","title":"Kuadrant Operator","text":"
    • Installation of data plane service protection components via their respective operators
    • Exposes RateLimitPolicy and AuthPolicy and is currently the policy controller for these APIs
    • Configures the Gateway to be able to leverage the data plane service protection components
    "},{"location":"architecture/docs/design/architectural-overview/#multi-cluster-gateway-controller","title":"Multi-Cluster Gateway Controller","text":"
    • Exposes DNSPolicy and TLSPolicy
    • Configures DNS providers (e.g AWS Route 53) and TLS providers
    • Focused around use cases involving distributed gateways (for example across clouds or geographic regions)
    • Integrates with Open Cluster Management as the multi-cluster management hub to distribute and observe gateway status based on the clusters they are deployed to. Works directly with Open Cluster Management APIs such PlacementDecision and ManifestWork.
    "},{"location":"architecture/docs/design/architectural-overview/#kuadrant-add-on-manager","title":"Kuadrant-add-on-manager","text":"
    • Sub component in the gateway controller repository
    • Follows the add-on pattern from Open Cluster Management
    • Responsible for configuring and installing Kuadrant into a target spoke cluster
    "},{"location":"architecture/docs/design/architectural-overview/#limitador-operator","title":"Limitador Operator:","text":"
    • Installs and configures Limitador
    "},{"location":"architecture/docs/design/architectural-overview/#authorino-operator","title":"Authorino Operator:","text":"
    • Installs and configures Authorino
    "},{"location":"architecture/docs/design/architectural-overview/#data-plane-components-and-responsibilities","title":"Data Plane Components and Responsibilities","text":"

    A data plane component sits in the request flow and is responsible for enforcing policy and providing service protection capabilities based on configuration managed and created by the control plane.

    "},{"location":"architecture/docs/design/architectural-overview/#limitador","title":"Limitador","text":"
    • Complies with the with Envoy rate limiting API to provide rate limiting to the gateway
    "},{"location":"architecture/docs/design/architectural-overview/#authorino","title":"Authorino","text":"
    • Complies with the Envoy external auth API to provide auth integration to the gateway
    "},{"location":"architecture/docs/design/architectural-overview/#wasm-shim","title":"WASM Shim","text":"
    • Uses the Proxy WASM ABI Spec to integrate with Envoy and provide filtering and connectivity to Limitador for request time enforcement of and rate limiting
    "},{"location":"architecture/docs/design/architectural-overview/#dependencies-and-integrations","title":"Dependencies and integrations","text":"

    In order to provide its full suite of functionality, Kuadrant has several dependencies. Some of these are optional depending on the functionality needed.

    "},{"location":"architecture/docs/design/architectural-overview/#cert-manager-required","title":"Cert-Manager: Required","text":"
    • Provides TLS integration
    • Used by TLSPolicy and Authorino.
    "},{"location":"architecture/docs/design/architectural-overview/#open-cluster-manager-required","title":"Open Cluster Manager: Required","text":"
    • Provides a multi-cluster control plane to enable the defining and distributing of Gateways across multiple clusters.
    "},{"location":"architecture/docs/design/architectural-overview/#istio-required","title":"Istio: Required","text":"
    • Gateway API provider that Kuadrant integrates with via WASM and Istio APIS to provide service protection capabilities.
    • Used by RateLimitPolicy and AuthPolicy
    "},{"location":"architecture/docs/design/architectural-overview/#gateway-api-required","title":"Gateway API: Required","text":"
    • New standard for Ingress from the Kubernetes community
    • Gateway API is the core API that Kuadrant integrates with.
    "},{"location":"architecture/docs/design/architectural-overview/#thanosprometheusgrafana-optional","title":"Thanos/Prometheus/Grafana: Optional","text":"
    • Provides observability integration
    • Rather than providing any Kuadrant specific observability tooling, we instead look to leverage existing tools and technologies to provide observability capabilities for ingress.
    "},{"location":"architecture/docs/design/architectural-overview/#high-level-multi-cluster-architecture","title":"High Level Multi-Cluster Architecture","text":"

    Kuadrant has a multi-cluster gateway controller that is intended to run in a Open Cluster Management provided \"Hub\" cluster. This cluster is effectively a central management cluster where policy and gateways along with all that Open Cluster Management offers can be defined and distributed to the managed \"spoke\" clusters.

    "},{"location":"architecture/docs/design/architectural-overview/#single-cluster","title":"Single cluster","text":"

    In a single cluster context, the overall architecture remains the same as above, the key difference is that the Hub and Spoke cluster are now a single cluster rather than multiple clusters. This is how we are initially supporting single cluster.

    "},{"location":"architecture/docs/design/architectural-overview/#how-does-kuadrant-leverage-open-cluster-management","title":"How does Kuadrant leverage Open Cluster Management?","text":"

    Kuadrant deploys a multi-cluster gateway controller into the Open Cluster Management hub (a control plane that manages a set of \"spoke\" clusters where workloads are executed). This controller offers its own APIs but also integrates with hub CRD based APIs (such as the placement API) along with the Gateway API CRD based APIs in order to provide multi-cluster Gateway capabilities to the hub and distribute actual gateway instances to the spokes. See the Open Cluster Management docs for further details on the hub spoke architecture.

    As part of installing Kuadrant, the Gateway API CRDs are also installed into the hub cluster and Kuadrant defines a standard Gateway API GatewayClass resource that the multi-cluster gateway controller is the chosen controller for.

    Once installed, an Open Cluster Management user can then (with the correct RBAC in place) define in the standard way a Gateway resource that inherits from the Kuadrant configured GatewayClass in the hub. There is nothing unique about this Gateway definition, the difference is what it represents and how it is used. This Gateway is used to represent a \"multi-cluster\" distributed gateway. As such there are no pods running behind this Gateway instance in the hub cluster, instead it serves as a template that the Kuadrant multi-cluster gateway controller reconciles and distributes to targeted spoke clusters. It leverages the Open Cluster Management APIs to distribute these gateways (more info below) and aggregates the status information from each spoke cluster instance of this gateway back to this central definition, in doing this it can represent the status of the gateway across multiple clusters but also use that information to integrate with DNS providers etc.

    "},{"location":"architecture/docs/design/architectural-overview/#gateway-deployment-and-distribution","title":"Gateway Deployment and Distribution","text":"

    In order for a multi-cluster gateway to be truly useful, it needs to be distributed or \"placed\" on a specific set of hub managed spoke clusters. Open Cluster Management is responsible for a set of placement and replication APIs. Kuadrant is aware of these APIs, and so when a given gateway is chosen to be placed on a set of managed clusters, Kuadrant multi-cluster gateway controller will ensure the right resources (ManifestWork) are created in the correct namespaces in the hub. Open Cluster Management then is responsible for syncing these to the actual spoke cluster and reporting back the status of these resources to the Hub. A user would indicate which clusters they want a gateway placed on by using a Placement and then labeling the gateway using the cluster.open-cluster-management.io/placement label.

    In order for the Gateway to be instantiated, we need to know what underlying gateway provider is being used on the spoke clusters. Admins can then set this provider in the hub via the GatewayClass params. In the hub, Kuadrant will then apply a transformation to the gateway to ensure when synced it references this spoke gateway provider (Istio for example).

    It is the Open Cluster Management workagent that is responsible for syncing down and applying the resources into the managed spoke cluster. It is also responsible for syncing status information back to the hub. It is the multi-cluster gateway controller that is responsible for aggregating this status.

    The status information reported back to the Hub is used by the multi-cluster gateway controller to know what LB hosts / IPAddresses to use for DNSRecords that it creates and manages.

    More info on the Open Cluster Management hub and spoke architecture can be found here

    "},{"location":"architecture/docs/design/architectural-overview/#how-does-kuadrant-integrate-with-gateway-providers","title":"How does Kuadrant integrate with Gateway Providers?","text":"

    Currently the Kuadrant data plane only integrates with an Istio based gateway provider:

    • It registers Authorino with the IstioOperator as an auth provider so that Authorino can be used as an external auth provider.
    • It leverages an EnvoyFilter to register the rate limiting service as an upstream service.
    • Based on the Kuadrant AuthPolicy, it leverages Istio's AuthorizationPolicy resource to configure when a request should trigger Authorino to be called for a given host, path and method etc.
    • It provides a WebAssembly (WASM) Plugin that conforms to the Proxy WASM ABI (application binary interface). This WASM Plugin is loaded into the underlying Envoy based gateway provider and configured via the Kuadrant Operator based on defined RateLimitPolicy resources. This binary is executed in response to a HTTP request being accepted by the gateway via the underlying Envoy instance that provides the proxy layer for the Gateway (IE Envoy). This plugin is configured with the correct upstream rate limit service name and when it sees a request, based on the provided configuration, it will trigger a call to the installed Limitador that is providing the rate limit capabilities and either allow the request to continue or trigger a response to the client with a 429 (too many requests) HTTP code.
    "},{"location":"architecture/docs/design/architectural-overview/#data-flows","title":"Data Flows","text":"

    There are several different data flows when using Kuadrant.

    "},{"location":"architecture/docs/design/architectural-overview/#control-plane-configuration-and-status-reporting","title":"Control plane configuration and status reporting","text":"

    The initial creation of these APIs (gateways, policies etc) is done by the relevant persona in the control plane just as they would any other k8s resource. We use the term cluster admin or gateway admin as the operations type persona configuring, and placing gateways. As shown above, in a multi-cluster configuration. API definitions are pulled from the Hub and \"manifested\" into the spokes. The Status of those synced resources are reported back to the Hub. The same happens for a single cluster, the only difference being the work agent hub controllers are all installed on one cluster.

    "},{"location":"architecture/docs/design/architectural-overview/#third-party-enforcement-and-integration","title":"Third party enforcement and Integration","text":"

    In order to enforce the policy configuration, components in the control plane and data plane can reach out to configured 3rd parties such as cloud based DNS provider, TLS providers and Auth providers.

    "},{"location":"architecture/docs/design/architectural-overview/#request-flow","title":"Request Flow","text":"

    Requests coming through the gateway instance can be sent to Limitador based on configuration of the WASM plugin installed into the Envoy based gateway provider or to Authorino based configuration provided by the Istio AuthorizationPolicy. Each of these components have the capability to see the request and need to in order to make the required decision. Each of these components can also prevent the request from reaching its intended backend destination based on user configuration.

    "},{"location":"architecture/docs/design/architectural-overview/#auth","title":"Auth","text":"

    As all of the APIs are CRDs, auth around creating these resources is handled in the standard way IE by the kubernetes cluster and RBAC. There is no relationship by default between the Auth features provided by Authorino to application developers and the auth requirements of the cluster API server.

    For Auth between Spoke and Hub see Open Cluster Management docs

    "},{"location":"architecture/docs/design/architectural-overview/#observability","title":"Observability","text":"

    Kuadrant doesn't provide any specific observability components, but rather provides a reference setup using well known and established components along with some useful dashboards to help observe key things around the Gateways. The focus of this setup, is in the context of a multi-cluster setup where Open Cluster Management is installed and gateways are being defined and distributed from that hub.

    "},{"location":"architecture/docs/design/architectural-overview/#some-notes-on-future-direction","title":"Some notes on future direction","text":"

    This section is here to provide some insight into architectural changes that may be seen in the near future:

    What is in this doc represents the architecture at point our MVP release. Below are some areas that we have identified that are likely to change in the coming releases. As these happen, this doc will also evolve.

    • We want to separate out the ocm integration into its own controller so that policies can evolve without a coupling to any one multi-cluster management solution
    • We want to separate the policies into their own controller that is capable of supporting both single (without Open Cluster Management) and multi-cluster (with Open Cluster Management enabled) contexts, so that the barrier to entry is reduced for those starting with a single cluster
    • We want to allow for an on cluster DNS Provider such as CoreDNS so that we can provide an implementation that is disconnected from any cloud provider and provides more flexible DNS setups.
    • We will look to reduce our integration with Istio and want to provide integration with additional gateway providers such as EnvoyGateway
    "},{"location":"architecture/docs/design/modular_installation/","title":"Kuadrant Proposal - Modular Installation","text":"

    Kuadrant is developing a set of loosely coupled functionalities built directly on top of Kubernetes. Kuadrant aims to allow customers to just install, use and understand those functionalities they need.

    "},{"location":"architecture/docs/design/modular_installation/#problem-statement","title":"Problem Statement","text":"

    Currently, the installation tool of kuadrant, the kuadrantctl CLI, installs all or nothing. Installing more than the customer needs adds unneeded complexity and operational effort. For example, if a customer is looking for rate limiting and not interested in authentication functionality, then the customer should be able to just install and run that part of Kuadrant.

    "},{"location":"architecture/docs/design/modular_installation/#high-level-goals","title":"High Level Goals","text":"
    • Install only required components. Operate only required components.

    Reduce system complexity and operational effort to the minimum required. Components in this context make reference to deployments and running instances.

    • Expose only the activated functionalities

    A user of a partial Kuadrant install should not be confronted with data in custom resources that has no meaning or is not accessible in their partial Kuadrant install. The design of the kuadrant API should have this goal into account.

    "},{"location":"architecture/docs/design/modular_installation/#proposed-solution","title":"Proposed Solution","text":"

    The kuadrant installation mechanism should offer modular installation to enable/disable loosely coupled pieces of kuadrant. Modular installation options should be feature oriented rather than deployment component oriented. Then, it is up to the installation tool to decide what components need to be deployed and how to configure it.

    Each feature, or part of it, is eligible to be included or excluded when installing kuadrant.

    Some profiles can be defined to group set of commonly required features. Naming the profiles allows the customer to easily express wanted installation configuration. Furthermore, profiles not only can be used to group a set of features, profiles can be used to define deployment options.

    Name Description Minimal Minimal installation required to run an API without any protection, analytics or API management. Default deployment option AuthZ Authentication and authorization mechanisms activated RateLimit Basic rate limit (only pre-auth rate limit) features Full Full featured kuadrant installation

    A kuadrant operator, together with a design of a kuadrant CRD is desired. Not only for kuadrant installation, but also for lifecycle management. Additionally, the kuadrantctl CLI tool can also be useful to either deploy kuadrant components and manifests or just deploy the kuadrant operator.

    The kuadrant control plane should be aware of the installed profile via env vars or command line params in the control plane running components. With that information, the control plane can decide to enable or disable CRD watching, label and annotation monitoring and ultimately reject any configuration object that relies on disabled functionality. The least a customer can expect from kuadrant is to be consistent and reject any functionality request that cannot provide.

    "},{"location":"kuadrantctl/","title":"kuadrantctl","text":"

    kuadrantctl is a CLI tool for managing Kuadrant configurations and resources.

    "},{"location":"kuadrantctl/#installing","title":"Installing","text":"

    kuadrantctl can be installed either by downloading pre-compiled binaries or by compiling from source. For most users, downloading the binary is the easiest and recommended method.

    "},{"location":"kuadrantctl/#installing-pre-compiled-binaries","title":"Installing Pre-compiled Binaries","text":"
    1. Download the latest binary for your platform from the kuadrantctl Releases page.
    2. Unpack the binary.
    3. Move it to a directory in your $PATH so that it can be executed from anywhere.
    "},{"location":"kuadrantctl/#compiling-from-source","title":"Compiling from Source","text":"

    If you prefer to compile from source or are contributing to the project, you can install kuadrantctl using make install. This method requires Golang 1.21 or newer.

    It is possible to use the make target install to compile from source. From root of the repository, run

    make install\n

    This will compile kuadrantctl and install it in the bin directory at root of directory. It will also ensure the correct version of the binary is displayed . It can be ran using ./bin/kuadrantctl .

    "},{"location":"kuadrantctl/#usage","title":"Usage","text":"

    Below is a high-level overview of its commands, along with links to detailed documentation for more complex commands.

    "},{"location":"kuadrantctl/#general-syntax","title":"General Syntax","text":"
    kuadrantctl [command] [subcommand] [flags]\n
    "},{"location":"kuadrantctl/#commands-overview","title":"Commands Overview","text":"Command Description completion Generate autocompletion scripts for the specified shell generate Commands related to Kubernetes Gateway API and Kuadrant resource generation from OpenAPI 3.x specifications topology Command related to Kuadrant topology help Help about any command version Print the version number of kuadrantctl"},{"location":"kuadrantctl/#flags","title":"Flags","text":"Flag Description -h, --help Help for kuadrantctl -v, --verbose Enable verbose output"},{"location":"kuadrantctl/#commands-detail","title":"Commands Detail","text":""},{"location":"kuadrantctl/#completion","title":"completion","text":"

    Generate an autocompletion script for the specified shell.

    Subcommand Description bash Generate script for Bash fish Generate script for Fish powershell Generate script for PowerShell zsh Generate script for Zsh"},{"location":"kuadrantctl/#generate","title":"generate","text":"

    Commands related to Kubernetes Gateway API and Kuadrant resource generation from OpenAPI 3.x specifications.

    Subcommand Description gatewayapi Generate Gateway API resources kuadrant Generate Kuadrant resources"},{"location":"kuadrantctl/#generate-gatewayapi","title":"generate gatewayapi","text":"

    Generate Gateway API resources from an OpenAPI 3.x specification

    Subcommand Description Flags httproute Generate Gateway API HTTPRoute from OpenAPI 3.0.X --oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o Output format: 'yaml' or 'json'. (default \"yaml\")"},{"location":"kuadrantctl/#topology","title":"topology","text":"

    Export and visualize kuadrant topology

    "},{"location":"kuadrantctl/#usage_1","title":"Usage","text":"
    $ kuadrantctl topology -h\nExport and visualize kuadrant topology\n\nUsage:\n  kuadrantctl topology [flags]\n\nFlags:\n  -d, --dot string         Graphviz DOT output file\n  -h, --help               help for topology\n  -n, --namespace string   Topology's namespace (default \"kuadrant-system\")\n  -o, --output string      SVG image output file\n\nGlobal Flags:\n  -v, --verbose   verbose output\n
    "},{"location":"kuadrantctl/#generate-kuadrant","title":"generate kuadrant","text":"

    Generate Kuadrant resources from an OpenAPI 3.x specification

    Subcommand Description Flags authpolicy Generate a Kuadrant AuthPolicy from an OpenAPI 3.0.x specification --oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o Output format: 'yaml' or 'json'. (default \"yaml\") ratelimitpolicy Generate Kuadrant RateLimitPolicy from an OpenAPI 3.0.x specification --oas string Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required). -o Output format: 'yaml' or 'json'. (default \"yaml\")"},{"location":"kuadrantctl/#version","title":"version","text":"

    Print the version number of kuadrantctl.

    No additional flags or subcommands.

    "},{"location":"kuadrantctl/#additional-guides","title":"Additional Guides","text":""},{"location":"kuadrantctl/#generating-gateway-api-httproute-objects","title":"Generating Gateway API HTTPRoute Objects","text":"
    • Generates Gateway API HTTPRoute objects from an OpenAPI Specification (OAS) 3.x.
    • Supports reading from a file, URL, or stdin.
    • Example usages and more information can be found in the detailed guide.
    "},{"location":"kuadrantctl/#generating-kuadrant-authpolicy-objects","title":"Generating Kuadrant AuthPolicy Objects","text":"
    • Generates Kuadrant AuthPolicy objects for managing API authentication.
    • Supports openIdConnect and apiKey types from the OpenAPI Security Scheme Object.
    • Example usages and more information can be found in the detailed guide.
    "},{"location":"kuadrantctl/#generating-kuadrant-ratelimitpolicy-objects","title":"Generating Kuadrant RateLimitPolicy Objects","text":"
    • Generates Kuadrant RateLimitPolicy objects for managing API rate limiting.
    • Supports reading from a file, URL, or stdin.
    • Example usages and more information can be found in the detailed guide.

    For more detailed information about each command, including options and usage examples, use kuadrantctl [command] --help.

    "},{"location":"kuadrantctl/#using-with-github-actions","title":"Using with GitHub Actions","text":"
    - name: Install kuadrantctl\n  uses: jaxxstorm/action-install-gh-release@v1.10.0\n  with: # Grab the latest version\n    repo: Kuadrant/kuadrantctl\n
    "},{"location":"kuadrantctl/#commands","title":"Commands","text":"
    • Generate Gateway API HTTPRoute objects from OpenAPI 3.X
    • Generate Kuadrant RateLimitPolicy from OpenAPI 3.X
    • Generate Kuadrant AuthPolicy from OpenAPI 3.X
    "},{"location":"kuadrantctl/#contributing","title":"Contributing","text":"

    The Development guide describes how to build the kuadrantctl CLI and how to test your changes before submitting a patch or opening a PR.

    "},{"location":"kuadrantctl/#licensing","title":"Licensing","text":"

    This software is licensed under the Apache 2.0 license.

    See the LICENSE and NOTICE files that should have been provided along with this software for details.

    "},{"location":"kuadrantctl/doc/RELEASE/","title":"RELEASE","text":""},{"location":"kuadrantctl/doc/RELEASE/#release","title":"Release","text":"

    The release process follows a streamlined approach, no release branches involved. New releases can be major, minor or patch based releases, but always incrementing digits regarding the latest release version.

    "},{"location":"kuadrantctl/doc/RELEASE/#new-majorminorpatch-version","title":"New Major.Minor.Patch version","text":"
    1. Create a new minor release branch from the HEAD of main:
      git checkout -b release-vX.Y.Z\n
    2. Update version (prefixed with \"v\"):
      make prepare-release VERSION=vX.Y.Z\n
    3. Verify local changes:
      make install\nbin/kuadrantctl version\n
      The output should be the new version, for example :
      kuadrantctl v0.3.0 (ff779a1-dirty)\n
    4. Commit and push:
      git add .\ngit commit -m \"prepare-release: release-vX.Y.Z\"\ngit push origin release-vX.Y.Z\n
    5. Create git tag:
      git tag -s -m vX.Y.Z vX.Y.Z\ngit push origin vX.Y.Z\n
    6. In Github, create release.

    7. Pick recently pushed git tag

    8. Automatically generate release notes from previous released tag
    9. Set as the latest release

    10. Verify that the build Release workflow is triggered and completes for the new tag

    "},{"location":"kuadrantctl/doc/RELEASE/#verify-new-release-is-available","title":"Verify new release is available","text":"
    1. Download the latest binary for your platform from the kuadrantctl Latest Releases page.
    2. Unpack the binary.
    3. Move it to a directory in your $PATH so that it can be executed from anywhere.
    4. Check the version:
      kuadrantctl version\n
      The output should be the new version, for example :
      kuadrantctl v0.3.0 (eec318b2e11e7ea5add5e550ff872bde64555d8f)\n
    "},{"location":"kuadrantctl/doc/development/","title":"Development Guide","text":""},{"location":"kuadrantctl/doc/development/#technology-stack-required-for-development","title":"Technology stack required for development","text":"
    • git
    • go version 1.21+
    "},{"location":"kuadrantctl/doc/development/#build-the-cli","title":"Build the CLI","text":"
    $ git clone https://github.com/kuadrant/kuadrantctl.git\n$ cd kuadrantctl && make install\n$ bin/kuadrantctl version\n{\"level\":\"info\",\"ts\":\"2023-11-08T23:44:57+01:00\",\"msg\":\"kuadrantctl version: latest\"}\n
    "},{"location":"kuadrantctl/doc/development/#quick-steps-to-contribute","title":"Quick steps to contribute","text":"
    • Fork the project.
    • Download your fork to your PC (git clone https://github.com/your_username/kuadrantctl && cd kuadrantctl)
    • Create your feature branch (git checkout -b my-new-feature)
    • Make changes and run tests (make test)
    • Add them to staging (git add .)
    • Commit your changes (git commit -m 'Add some feature')
    • Push to the branch (git push origin my-new-feature)
    • Create new pull request
    "},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/","title":"Generate gateway api httproute","text":""},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/#generate-gateway-api-httproute-object-from-openapi-3","title":"Generate Gateway API HTTPRoute object from OpenAPI 3","text":"

    The kuadrantctl generate gatewayapi httproute command generates an Gateway API HTTPRoute from your OpenAPI Specification (OAS) 3.x powered with kuadrant extensions.

    "},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/#openapi-specification","title":"OpenAPI specification","text":"

    An OpenAPI document resource can be provided to the cli by one of the following channels:

    • Filename in the available path.
    • URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
    • Read from stdin standard input stream.
    "},{"location":"kuadrantctl/doc/generate-gateway-api-httproute/#usage","title":"Usage","text":"
    $ kuadrantctl generate gatewayapi httproute -h\nGenerate Gateway API HTTPRoute from OpenAPI 3.0.X\n\nUsage:\n  kuadrantctl generate gatewayapi httproute [flags]\n\nFlags:\n  -h, --help          help for httproute\n  --oas string        Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)\n  -o Output format:   'yaml' or 'json'. (default \"yaml\")\n\nGlobal Flags:\n  -v, --verbose   verbose output\n

    Under the example folder there are examples of OAS 3 that can be used to generate the resources

    As an AuthPolicy and RateLimitPolicy both require a HTTPRoute to target, the user guides for generating those policies include examples of running the kuadrantctl generate gatewayapi httproute command.

    You can find those guides here:

    • Generate Kuadrant AuthPolicy
    • Generate Kuadrant RateLimitPolicy
    "},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/","title":"Generate kuadrant auth policy","text":""},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#generate-kuadrant-authpolicy-object-from-openapi-3","title":"Generate Kuadrant AuthPolicy object from OpenAPI 3","text":"

    The kuadrantctl generate kuadrant authpolicy command generates an Kuadrant AuthPolicy from your OpenAPI Specification (OAS) 3.x powered with kuadrant extensions.

    "},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#openapi-specification","title":"OpenAPI specification","text":"

    An OpenAPI document resource can be provided to the cli by one of the following channels:

    • Filename in the available path.
    • URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
    • Read from stdin standard input stream.

    OpenAPI Security Scheme Object types

    Types Implemented openIdConnect YES apiKey YES http NO oauth2 NO"},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#openidconnect-type-description","title":"openIdConnect Type Description","text":"

    The following OAS example has one protected endpoint GET /dog with openIdConnect security scheme type.

    paths:\n  /dog:\n    get:\n      operationId: \"getDog\"\n      security:\n\n        - securedDog: []\n      responses:\n        405:\n          description: \"invalid input\"\ncomponents:\n  securitySchemes:\n    securedDog:\n      type: openIdConnect\n      openIdConnectUrl: https://example.com/.well-known/openid-configuration\n

    Running the command

    kuadrantctl generate kuadrant authpolicy --oas ./petstore-openapi.yaml  | yq -P\n

    The generated authpolicy (only relevan fields shown here):

    kind: AuthPolicy\napiVersion: kuadrant.io/v1beta2\nmetadata:\n  name: petstore\n  namespace: petstore\n  creationTimestamp: null\nspec:\n  routeSelectors:\n\n    - matches:\n        - path:\n            type: Exact\n            value: /api/v1/dog\n          method: GET\n  rules:\n    authentication:\n      getDog_securedDog:\n        credentials: {}\n        jwt:\n          issuerUrl: https://example.com/.well-known/openid-configuration\n        routeSelectors:\n          - matches:\n              - path:\n                  type: Exact\n                  value: /api/v1/dog\n                method: GET\n
    "},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#apikey-type-description","title":"apiKey Type Description","text":"

    The following OAS example has one protected endpoint GET /dog with apiKey security scheme type.

    paths:\n  /dog:\n    get:\n      operationId: \"getDog\"\n      security:\n\n        - securedDog: []\n      responses:\n        405:\n          description: \"invalid input\"\ncomponents:\n  securitySchemes:\n    securedDog:\n      type: apiKey\n      name: dog_token\n      in: query\n

    Running the command

    kuadrantctl generate kuadrant authpolicy --oas ./petstore-openapi.yaml  | yq -P\n

    The generated authpolicy (only relevan fields shown here):

    kind: AuthPolicy\napiVersion: kuadrant.io/v1beta2\nmetadata:\n  name: petstore\n  namespace: petstore\n  creationTimestamp: null\nspec:\n  routeSelectors:\n\n    - matches:\n        - path:\n            type: Exact\n            value: /dog\n          method: GET\n  rules:\n    authentication:\n      getDog_securedDog:\n        credentials:\n          queryString:\n            name: dog_token\n          apiKey:\n            selector:\n              matchLabels:\n                kuadrant.io/apikeys-by: securedDog\n        routeSelectors:\n          - matches:\n              - path:\n                  type: Exact\n                  value: /dog\n                method: GET\n

    In this particular example, the endpoint GET /dog will be protected. The token needs to be in the query string of the request included in a parameter named dog_token. Kuadrant will validate received tokens against tokens found in kubernetes secrets with label kuadrant.io/apikeys-by: ${sec scheme name}. In this particular example the label selector will be: kuadrant.io/apikeys-by: securedDog.

    Like the following example:

    apiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    kuadrant.io/apikeys-by: securedDog\nstringData:\n  api_key: MYSECRETTOKENVALUE\ntype: Opaque\n

    Note: Kuadrant validates tokens against api keys found in secrets. The label selector format kuadrant.io/apikeys-by: ${sec scheme name} is arbitrary and designed for this CLI command.

    For more information about Kuadrant auth based on api key: https://docs.kuadrant.io/latest/authorino/docs/user-guides/api-key-authentication/

    "},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#usage","title":"Usage","text":"
    Generate Kuadrant AuthPolicy from OpenAPI 3.0.X\n\nUsage:\n  kuadrantctl generate kuadrant authpolicy [flags]\n\nFlags:\n  -h, --help         help for authpolicy\n  --oas string        Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)\n  -o Output format:   'yaml' or 'json'. (default \"yaml\")\n\nGlobal Flags:\n  -v, --verbose   verbose output\n

    Under the example folder there are examples of OAS 3 that can be used to generate the resources

    "},{"location":"kuadrantctl/doc/generate-kuadrant-auth-policy/#user-guide","title":"User Guide","text":"

    The verification steps will lead you to the process of deploying and testing the following api with endpoints protected using different security schemes:

    Operation Security Scheme GET /api/v1/cat public (not auth) POST /api/v1/cat ApiKey in header GET /api/v1/dog OpenIdConnect GET /api/v1/snake OpenIdConnect OR ApiKey in query string
    • [Optional] Setup SSO service supporting OIDC. For this example, we will be using keycloak.
    • Create a new realm petstore
    • Create a client petstore. In the Client Protocol field, select openid-connect.
    • Configure client settings. Access Type to public. Direct Access Grants Enabled to ON (for this example password will be used directly to generate the token).
    • Add a user to the realm
      • Click the Users menu on the left side of the window. Click Add user.
      • Type the username bob, set the Email Verified switch to ON, and click Save.
      • On the Credentials tab, set the password p. Enter the password in both the fields, set the Temporary switch to OFF to avoid the password reset at the next login, and click Set Password.

    Now, let's run local cluster to test the kuadrantctl new command to generate authpolicy.

    • Clone the repo
    git clone https://github.com/Kuadrant/kuadrantctl.git\ncd kuadrantctl\n
    • Setup a cluster, Istio and Gateway API CRDs and Kuadrant

    Use our single-cluster quick start script - this will install Kuadrant in a local kind cluster: https://docs.kuadrant.io/latest/getting-started-single-cluster/

    • Build and install CLI in bin/kuadrantctl path
    make install\n
    • Deploy petstore backend API
    kubectl create namespace petstore\nkubectl apply -n petstore -f examples/petstore/petstore.yaml\n
    • Let's create Petstore's OpenAPI spec
    cat <<EOF >petstore-openapi.yaml\n---\nopenapi: \"3.1.0\"\ninfo:\n  title: \"Pet Store API\"\n  version: \"1.0.0\"\nx-kuadrant:\n  route:\n    name: \"petstore\"\n    namespace: \"petstore\"\n    hostnames:\n\n      - example.com\n    parentRefs:\n      - name: istio-ingressgateway\n        namespace: istio-system\nservers:\n  - url: https://example.io/api/v1\npaths:\n  /cat:\n    x-kuadrant:\n      backendRefs:\n        - name: petstore\n          port: 80\n          namespace: petstore\n    get:  # No sec requirements\n      operationId: \"getCat\"\n      responses:\n        405:\n          description: \"invalid input\"\n    post:  # API key\n      operationId: \"postCat\"\n      security:\n        - cat_api_key: []\n      responses:\n        405:\n          description: \"invalid input\"\n  /dog:\n    x-kuadrant:\n      backendRefs:\n        - name: petstore\n          port: 80\n          namespace: petstore\n    get:  # OIDC\n      operationId: \"getDog\"\n      security:\n        - oidc:\n          - read:dogs\n      responses:\n        405:\n          description: \"invalid input\"\n  /snake:\n    x-kuadrant:\n      backendRefs:\n        - name: petstore\n          port: 80\n          namespace: petstore\n    get:  # OIDC or API key\n      operationId: \"getSnake\"\n      security:\n        - oidc: [\"read:snakes\"]\n        - snakes_api_key: []\n      responses:\n        405:\n          description: \"invalid input\"\ncomponents:\n  securitySchemes:\n    cat_api_key:\n      type: apiKey\n      name: api_key\n      in: header\n    oidc:\n      type: openIdConnect\n      openIdConnectUrl: https://${KEYCLOAK_PUBLIC_DOMAIN}/auth/realms/petstore\n    snakes_api_key:\n      type: apiKey\n      name: snake_token\n      in: query\nEOF\n

    Replace ${KEYCLOAK_PUBLIC_DOMAIN} with your SSO instance domain

    • Create an API key only valid for POST /api/v1/cat endpoint

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: cat-api-key-1\n  namespace: petstore\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    kuadrant.io/apikeys-by: cat_api_key\nstringData:\n  api_key: I_LIKE_CATS\ntype: Opaque\nEOF\n

      Note: the label's value of kuadrant.io/apikeys-by: cat_api_key is the name of the sec scheme of the OpenAPI spec.

    • Create an API key only valid for GET /api/v1/snake endpoint

    kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: snake-api-key-1\n  namespace: petstore\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    kuadrant.io/apikeys-by: snakes_api_key\nstringData:\n  api_key: I_LIKE_SNAKES\ntype: Opaque\nEOF\n

    Note: the label's value of kuadrant.io/apikeys-by: snakes_api_key is the name of the sec scheme of the OpenAPI spec.

    • Create the HTTPRoute using the CLI
    bin/kuadrantctl generate gatewayapi httproute --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n
    • Create Kuadrant's Auth Policy
    bin/kuadrantctl generate kuadrant authpolicy --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n

    Now, we are ready to test OpenAPI endpoints

    • GET /api/v1/cat -> It's a public endpoint, hence should return 200 Ok
    curl  -H \"Host: example.com\" -i \"http://127.0.0.1:9080/api/v1/cat\"\n
    • POST /api/v1/cat -> It's a protected endpoint with apikey

    Without any credentials, it should return 401 Unauthorized

    curl  -H \"Host: example.com\" -X POST -i \"http://127.0.0.1:9080/api/v1/cat\"\n
    HTTP/1.1 401 Unauthorized\nwww-authenticate: Bearer realm=\"getDog_oidc\"\nwww-authenticate: Bearer realm=\"getSnake_oidc\"\nwww-authenticate: snake_token realm=\"getSnake_snakes_api_key\"\nwww-authenticate: api_key realm=\"postCat_cat_api_key\"\nx-ext-auth-reason: {\"postCat_cat_api_key\":\"credential not found\"}\ndate: Tue, 28 Nov 2023 22:28:44 GMT\nserver: istio-envoy\ncontent-length: 0\n

    The reason headers tell that credential not found. Credentials satisfying postCat_cat_api_key authentication is needed.

    According to the OpenAPI spec, it should be a header named api_key. What if we try a wrong token? one token assigned to other endpoint, i.e. I_LIKE_SNAKES instead of the valid one I_LIKE_CATS. It should return 401 Unauthorized.

    curl  -H \"Host: example.com\" -H \"api_key: I_LIKE_SNAKES\" -X POST -i \"http://127.0.0.1:9080/api/v1/cat\"\n
    HTTP/1.1 401 Unauthorized\nwww-authenticate: Bearer realm=\"getDog_oidc\"\nwww-authenticate: Bearer realm=\"getSnake_oidc\"\nwww-authenticate: snake_token realm=\"getSnake_snakes_api_key\"\nwww-authenticate: api_key realm=\"postCat_cat_api_key\"\nx-ext-auth-reason: {\"postCat_cat_api_key\":\"the API Key provided is invalid\"}\ndate: Tue, 28 Nov 2023 22:32:55 GMT\nserver: istio-envoy\ncontent-length: 0\n

    The reason headers tell that the API Key provided is invalid. Using valid token (from the secret cat-api-key-1 assigned to POST /api/v1/cats) in the api_key header should return 200 Ok

    curl  -H \"Host: example.com\" -H \"api_key: I_LIKE_CATS\" -X POST -i \"http://127.0.0.1:9080/api/v1/cat\"\n
    • GET /api/v1/dog -> It's a protected endpoint with oidc (assigned to our keycloak instance and petstore realm)

    without credentials, it should return 401 Unauthorized

    curl -H \"Host: example.com\" -i \"http://127.0.0.1:9080/api/v1/dog\"\n

    To get the authentication token, this example is using Direct Access Grants oauth2 grant type (also known as Client Credentials grant type). When configuring the Keycloak (OIDC provider) client settings, we enabled Direct Access Grants to enable this procedure. We will be authenticating as bob user with p password. We previously created bob user in Keycloak in the petstore realm.

    export ACCESS_TOKEN=$(curl -k -H \"Content-Type: application/x-www-form-urlencoded\" \\\n        -d 'grant_type=password' \\\n        -d 'client_id=petstore' \\\n        -d 'scope=openid' \\\n        -d 'username=bob' \\\n        -d 'password=p' \"https://${KEYCLOAK_PUBLIC_DOMAIN}/auth/realms/petstore/protocol/openid-connect/token\" | jq -r '.access_token')\n

    Replace ${KEYCLOAK_PUBLIC_DOMAIN} with your SSO instance domain

    With the access token in place, let's try to get those puppies

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: example.com' http://127.0.0.1:9080/api/v1/dog -i\n

    it should return 200 OK

    • GET /api/v1/snake -> It's a protected endpoint with oidc (assigned to our keycloak instance and petstore realm) OR with apiKey

    This example is to show that multiple security requirements (with OR semantics) can be specified for an OpenAPI operation.

    Without credentials, it should return 401 Unauthorized

    curl -H \"Host: example.com\" -i \"http://127.0.0.1:9080/api/v1/snake\"\n

    With the access token in place, it should return 200 OK (unless the token has expired).

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: example.com' http://127.0.0.1:9080/api/v1/snake -i\n

    With apiKey it should also work. According to the OpenAPI spec security scheme, it should be a query string named snake_token and the token needs to be valid token (from the secret snake-api-key-1 assigned to GET /api/v1/snake)

    curl -H 'Host: example.com' -i \"http://127.0.0.1:9080/api/v1/snake?snake_token=I_LIKE_SNAKES\"\n
    "},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/","title":"Generate kuadrant rate limit policy","text":""},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#generate-kuadrant-ratelimitpolicy-object-from-openapi-3","title":"Generate Kuadrant RateLimitPolicy object from OpenAPI 3","text":"

    The kuadrantctl generate kuadrant ratelimitpolicy command generates a Kuadrant RateLimitPolicy from your OpenAPI Specification (OAS) 3.x document powered with Kuadrant extensions.

    "},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#openapi-specification","title":"OpenAPI specification","text":"

    An OpenAPI document resource can be provided to the Kuadrant CLI in one of the following ways:

    • Filename in the available path.
    • URL format (supported schemes are HTTP and HTTPS). The CLI will try to download from the given address.
    • Read from stdin standard input stream.
    "},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#usage","title":"Usage","text":"
    Generate Kuadrant RateLimitPolicy from OpenAPI 3.0.x\n\nUsage:\n  kuadrantctl generate kuadrant ratelimitpolicy [flags]\n\nFlags:\n  -h, --help         help for ratelimitpolicy\n  --oas string        Path to OpenAPI spec file (in JSON or YAML format), URL, or '-' to read from standard input (required)\n  -o Output format:   'yaml' or 'json'. (default \"yaml\")\n\nGlobal Flags:\n  -v, --verbose   verbose output\n

    Note: The kuadrantctl/examples directory in GitHub includes sample OAS 3 files that you can use to generate the resources.

    "},{"location":"kuadrantctl/doc/generate-kuadrant-rate-limit-policy/#procedure","title":"Procedure","text":"
    1. Clone the Git repository as follows:

      git clone https://github.com/Kuadrant/kuadrantctl.git\ncd kuadrantctl\n ```\n2. Set up a cluster, Istio and Gateway API CRDs, and Kuadrant as follows: \n\n\n* Use the single-cluster quick start script to install Kuadrant in a local `kind` cluster: https://docs.kuadrant.io/latest/getting-started-single-cluster/.\n\n\n3. Build and install the CLI in `bin/kuadrantctl` path as follows:\n```bash\nmake install\n

    2. Deploy the Petstore backend API as follows:

      kubectl create namespace petstore\nkubectl apply -n petstore -f examples/petstore/petstore.yaml\n

    3. Create the Petstore OpenAPI definition as follows:

    cat <<EOF >petstore-openapi.yaml\n---\nopenapi: \"3.0.3\"\ninfo:\n  title: \"Pet Store API\"\n  version: \"1.0.0\"\nx-kuadrant:  ## Root-level Kuadrant extension\n  route:\n    name: \"petstore\"\n    namespace: \"petstore\"\n    hostnames:\n\n      - example.com\n    parentRefs:\n      - name: istio-ingressgateway\n        namespace: istio-system\nservers:\n  - url: https://example.io/v1\npaths:\n  /cat:\n    x-kuadrant:  ## Path-level Kuadrant extension\n      backendRefs:\n        - name: petstore\n          port: 80\n          namespace: petstore\n      rate_limit:\n        rates:\n          - limit: 1\n            duration: 10\n            unit: second\n        counters:\n          - request.headers.x-forwarded-for\n    get:  # Added to the route and rate limited\n      operationId: \"getCat\"\n      responses:\n        405:\n          description: \"invalid input\"\n    post:  # NOT added to the route\n      x-kuadrant: \n        disable: true\n      operationId: \"postCat\"\n      responses:\n        405:\n          description: \"invalid input\"\n  /dog:\n    get:  # Added to the route and rate limited\n      x-kuadrant:  ## Operation-level Kuadrant extension\n        backendRefs:\n          - name: petstore\n            port: 80\n            namespace: petstore\n        rate_limit:\n          rates:\n            - limit: 3\n              duration: 10\n              unit: second\n          counters:\n            - request.headers.x-forwarded-for\n      operationId: \"getDog\"\n      responses:\n        405:\n          description: \"invalid input\"\n    post:  # Added to the route and NOT rate limited\n      x-kuadrant:  ## Operation-level Kuadrant extension\n        backendRefs:\n          - name: petstore\n            port: 80\n            namespace: petstore\n      operationId: \"postDog\"\n      responses:\n        405:\n          description: \"invalid input\"\nEOF\n

    Note: The servers base path is not included. This is work-in-progress in follow-up PRs.

    Operation Applied configuration GET /cat Should return 200 OK and be rate limited (1 req / 10 seconds). POST /cat Not added to the HTTPRoute. Should return 404 Not Found. GET /dog Should return 200 OK and be rate limited (3 req / 10 seconds). POST /dog Should return 200 OK and NOT rate limited.
    1. Create the HTTPRoute by using the CLI as follows:

      bin/kuadrantctl generate gatewayapi httproute --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n

    2. Create the rate limit policy as follows:

      bin/kuadrantctl generate kuadrant ratelimitpolicy --oas petstore-openapi.yaml | kubectl apply -n petstore -f -\n

    3. Test the OpenAPI endpoints as follows:

    4. GET /cat - Should return 200 OK and be rate limited (1 req / 10 seconds).

      curl --resolve example.com:9080:127.0.0.1 -v \"http://example.com:9080/cat\"\n

    5. POST /cat - Not added to the HTTPRoute. Should return 404 Not Found.
      curl --resolve example.com:9080:127.0.0.1 -v -X POST \"http://example.com:9080/cat\"\n
    6. GET /dog - Should return 200 OK and be rate limited (3 req / 10 seconds).
    curl --resolve example.com:9080:127.0.0.1 -v \"http://example.com:9080/dog\"\n
    • POST /dog - Should return 200 OK and NOT rate limited.
    curl --resolve example.com:9080:127.0.0.1 -v -X POST \"http://example.com:9080/dog\"\n
    "},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/","title":"kuadrantctl - CI/CD with Tekton and Argo CD","text":"

    This guide demonstrates setting up a CI/CD pipeline by using Tekton to deploy Kubernetes Gateway API and Kuadrant resources generated by kuadrantctl, from an OpenAPI definition. In this example, these resources are applied directly to the cluster where Tekton is running.

    "},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#prerequisites","title":"Prerequisites","text":"
    • Kuadrant, and all of its prerequisites, installed on a Kubernetes or OpenShift cluster.
    • Tekton Pipelines installed on your cluster.
    • kubectl configured with access to communicate with your cluster.
    • Optional: Tekton CLI tkn for easier interaction with Tekton resources.
    "},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#procedure","title":"Procedure","text":""},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-1-set-up-your-namespace","title":"Step 1 - Set up your namespace","text":"

    Create a dedicated namespace as follows:

    kubectl create namespace petstore\n
    "},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-2-create-a-persistent-volume-claim","title":"Step 2 - Create a Persistent Volume Claim","text":"

    For this example, to store associated Tekton build artifacts, create a Persistent Volume Claim (PVC) in the petstore namespace as follows:

    kubectl apply -n petstore -f - <<EOF\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n  name: tekton-kuadrantctl-pvc\n  namespace: petstore\nspec:\n  accessModes:\n\n    - ReadWriteOnce\n  resources:\n    requests:\n      storage: 1Gi\nEOF\n
    "},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-3-define-the-tekton-task","title":"Step 3 - Define the Tekton Task","text":"

    Define the task that outlines steps to clone a repository, generate Kuadrant and Kubernetes resources by using kuadrantctl, and apply them directly to the cluster as follows:

    kubectl apply -f - <<'EOF'\napiVersion: tekton.dev/v1beta1\nkind: Task\nmetadata:\n  name: run-kuadrantctl\n  namespace: petstore\nspec:\n  params:\n\n    - name: gitRepoUrl\n      description: URL of the git repository to clone\n    - name: gitRevision\n      description: Git revision to checkout (branch, tag, sha)\n  workspaces:\n    - name: source\n      description: Workspace to checkout the git repo\n    - name: kubeconfig\n      description: Workspace containing kubeconfig for Kubernetes cluster access\n  steps:\n    - name: clean-workspace\n      image: alpine:latest\n      script: |\n        sh -c 'rm -rf $(workspaces.source.path)/* $(workspaces.source.path)/.[!.]* $(workspaces.source.path)/..?*'\n    - name: clone\n      image: alpine/git:latest\n      script: |\n        git clone $(params.gitRepoUrl) $(workspaces.source.path)\n        cd $(workspaces.source.path)\n        git checkout $(params.gitRevision)\n    - name: download-kuadrantctl\n      image: curlimages/curl:latest\n      script: |\n        ARCH=$(uname -m)\n        case $ARCH in\n        x86_64) BIN_ARCH=\"amd64\";;\n        arm64) BIN_ARCH=\"arm64\";;\n        aarch64) BIN_ARCH=\"arm64\";;\n        *) echo \"Unsupported architecture: $ARCH\" && exit 1 ;;\n        esac\n        cd $(workspaces.source.path)\n        curl -LO \"https://github.com/Kuadrant/kuadrantctl/releases/download/v0.2.3/kuadrantctl-v0.2.3-linux-$BIN_ARCH.tar.gz\"\n        tar -xzf kuadrantctl-v0.2.3-linux-$BIN_ARCH.tar.gz\n    - name: run-kuadrantctl\n      image: alpine:latest\n      script: |\n        cd $(workspaces.source.path)\n        mkdir -p generated-resources\n        ./kuadrantctl generate kuadrant authpolicy --oas openapi.yaml | tee generated-resources/authpolicy.yaml\n        ./kuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml |  tee generated-resources/ratelimitpolicy.yaml\n        ./kuadrantctl generate gatewayapi httproute --oas openapi.yaml | tee generated-resources/httproute.yaml\n    - name: apply-resources\n      image: bitnami/kubectl\n      script: |\n        cd $(workspaces.source.path)\n        export KUADRANT_ZONE_ROOT_DOMAIN=example.com # domain name used in the HTTPRoute for the petstore sample app\n        for file in ./generated-resources/*.yaml; do\n          envsubst < \"$file\" | kubectl apply -n petstore -f - \n        done\nEOF\n

    Note: This example uses Tekton with kubectl to apply resources to a cluster. It is best to use a tool such as Argo CD to implement continuous delivery by using a GitOps approach. In this scenario, you would do the following:

    • Use kuadrantctl to generate Kubernetes and Kuadrant resources as part a Tekton pipeline.
    • Commit these new resources to a Git repository.
    • Use ArgoCD to sync these changes from the Git repository to a Kubernetes or OpenShift cluster.
    "},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-4-create-a-kubeconfig-secret","title":"Step 4 - Create a Kubeconfig secret","text":"

    Important: While this guide uses a kubeconfig secret for simplicity, do not use this in production environments. Instead, use a service account for enhanced security.

    This example uses a kubeconfig secret and role bindings to demonstrate how to provide access for pushing generated resources to a cluster. However, for production setups, employing a service account is best.

    To proceed, create a kubeconfig secret in the petstore namespace to provide Tekton with access to your Kubernetes cluster as follows:

    kubectl create secret generic kubeconfig-secret --from-file=kubeconfig=/path/to/.kube/config -n petstore\n

    Create an associated ClusterRole and ClusterRoleBinding as follows:

    kubectl apply -n petstore -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: kuadrant-ci-example-full-access\nrules:\n\n- apiGroups: [\"*\"]\n  resources: [\"*\"]\n  verbs: [\"*\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: kuadrant-ci-example-full-access-binding\nsubjects:\n- kind: ServiceAccount\n  name: default\n  namespace: petstore\nroleRef:\n  kind: ClusterRole\n  name: kuadrant-ci-example-full-access\n  apiGroup: rbac.authorization.k8s.io\nEOF\n
    "},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-5-trigger-the-taskrun","title":"Step 5 - Trigger the TaskRun","text":"

    Execute the task from the petstore namespace, referencing the kubeconfig secret for cluster access as follows:

    This example runs this task with the Kuadrant Petstore app: https://github.com/kuadrant/api-petstore.

    kubectl apply -n petstore -f - <<EOF\napiVersion: tekton.dev/v1beta1\nkind: TaskRun\nmetadata:\n  name: run-kuadrantctl-taskrun\n  namespace: petstore\nspec:\n  taskRef:\n    name: run-kuadrantctl\n  params:\n\n    - name: gitRepoUrl\n      value: \"https://github.com/kuadrant/api-petstore.git\"\n    - name: gitRevision\n      value: \"main\"\n  workspaces:\n    - name: source\n      persistentVolumeClaim:\n        claimName: tekton-kuadrantctl-pvc\n    - name: kubeconfig\n      secret:\n        secretName: kubeconfig-secret\nEOF\n

    If you have tkn installed, you can easily view the progress of the pipe run as follows:

    tkn taskrun list -n petstore\nNAME                      STARTED          DURATION   STATUS\nrun-kuadrantctl-taskrun   12 seconds ago   ---        Running(Pending)\n
    tkn taskrun logs -n petstore -f\n\n\n[clone] Cloning into '/workspace/source'...\n[clone] Already on 'main'\n[clone] Your branch is up to date with 'origin/main'.\n\n[download-kuadrantctl]   % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n[download-kuadrantctl]                                  Dload  Upload   Total   Spent    Left  Speed\n  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\n100 21.4M  100 21.4M    0     0  6601k      0  0:00:03  0:00:03 --:--:-- 8756k\n\n[run-kuadrantctl] {\"kind\":\"AuthPolicy\",\"apiVersion\":\"kuadrant.io/v1beta2\",\"metadata\":{\"name\":\"petstore\",\"namespace\":\"petstore\",\"creationTimestamp\":null,\"labels\":{\"deployment\":\"petstore\",\"owner\":\"jbloggs\"}},\"spec\":{\"targetRef\":{\"group\":\"gateway.networking.k8s.io\",\"kind\":\"HTTPRoute\",\"name\":\"petstore\",\"namespace\":\"petstore\"},\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/admin\"},\"method\":\"GET\"}]}],\"rules\":{\"authentication\":{\"storeAdmin_api_key\":{\"credentials\":{\"customHeader\":{\"name\":\"api_key\"}},\"apiKey\":{\"selector\":{\"matchLabels\":{\"kuadrant.io/apikeys-by\":\"api_key\"}}},\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/admin\"},\"method\":\"GET\"}]}]}}}},\"status\":{}}\n[run-kuadrantctl] {\"kind\":\"RateLimitPolicy\",\"apiVersion\":\"kuadrant.io/v1beta2\",\"metadata\":{\"name\":\"petstore\",\"namespace\":\"petstore\",\"creationTimestamp\":null,\"labels\":{\"deployment\":\"petstore\",\"owner\":\"jbloggs\"}},\"spec\":{\"targetRef\":{\"group\":\"gateway.networking.k8s.io\",\"kind\":\"HTTPRoute\",\"name\":\"petstore\",\"namespace\":\"petstore\"},\"limits\":{\"getInventory\":{\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/inventory\"},\"method\":\"GET\"}]}],\"rates\":[{\"limit\":10,\"duration\":10,\"unit\":\"second\"}]},\"loginUser\":{\"routeSelectors\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/user/login\"},\"method\":\"GET\"}]}],\"rates\":[{\"limit\":2,\"duration\":10,\"unit\":\"second\"}]}}},\"status\":{}}\n[run-kuadrantctl] {\"kind\":\"HTTPRoute\",\"apiVersion\":\"gateway.networking.k8s.io/v1beta1\",\"metadata\":{\"name\":\"petstore\",\"namespace\":\"petstore\",\"creationTimestamp\":null,\"labels\":{\"deployment\":\"petstore\",\"owner\":\"jbloggs\"}},\"spec\":{\"parentRefs\":[{\"kind\":\"Gateway\",\"namespace\":\"kuadrant-multi-cluster-gateways\",\"name\":\"prod-web\"}],\"hostnames\":[\"petstore.${KUADRANT_ZONE_ROOT_DOMAIN}\"],\"rules\":[{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/user/login\"},\"method\":\"GET\"}],\"backendRefs\":[{\"name\":\"petstore\",\"namespace\":\"petstore\",\"port\":8080}]},{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/admin\"},\"method\":\"GET\"}],\"backendRefs\":[{\"name\":\"petstore\",\"namespace\":\"petstore\",\"port\":8080}]},{\"matches\":[{\"path\":{\"type\":\"Exact\",\"value\":\"/api/v3/store/inventory\"},\"method\":\"GET\"}],\"backendRefs\":[{\"name\":\"petstore\",\"namespace\":\"petstore\",\"port\":8080}]}]},\"status\":{\"parents\":null}}\n\n[apply-resources] authpolicy.kuadrant.io/petstore created\n[apply-resources] httproute.gateway.networking.k8s.io/petstore created\n[apply-resources] ratelimitpolicy.kuadrant.io/petstore created\n
    "},{"location":"kuadrantctl/doc/kuadrantctl-ci-cd/#step-6-cleanup","title":"Step 6 - Cleanup","text":"

    Clean up your resources as follows:

    1. Remove the petstore namespace:
    2. kubectl delete ns petstore
    3. Remove the ClusterRole and ClusterRoleBinding:
    4. kubectl delete clusterrole kuadrant-ci-example-full-access
    5. kubectl delete clusterrolebinding kuadrant-ci-example-full-access-binding
    "},{"location":"kuadrantctl/doc/openapi-apicurio/","title":"Using Apicurio Studio with Kuadrant OAS extensions","text":"

    You can use OpenAPI extensions to define extra functionality beyond what is covered by the standard OpenAPI specification. Extensions typically start with the x- prefix, for example, x-codegen. Kuadrant OpenAPI extensions start with the x-kuadrant prefix, and allow you to configure Kuadrant policy information alongside your API.

    Apicurio Studio is a UI tool for visualizing and editing OpenAPI designs and definitions, which can visualize security details and custom extensions specified in your OpenAPI definition.

    "},{"location":"kuadrantctl/doc/openapi-apicurio/#prerequisites","title":"Prerequisites","text":"
    • You have Apicurio Studio installed and running. For more information, see the Apicurio Studio documentation.
    "},{"location":"kuadrantctl/doc/openapi-apicurio/#procedure","title":"Procedure","text":""},{"location":"kuadrantctl/doc/openapi-apicurio/#step-1-access-your-openapi-definition-in-apicurio-studio","title":"Step 1 - Access your OpenAPI definition in Apicurio Studio","text":"

    Open or import your OpenAPI definition in Apicurio Studio. On the Design tab, select the VENDOR-EXTENSiONS section to add an extension. Alternatively, you can use the Source tab to edit the API definition directly.

    "},{"location":"kuadrantctl/doc/openapi-apicurio/#step-2-add-kuadrant-extensions-to-your-api-definition","title":"Step 2 - Add Kuadrant extensions to your API definition","text":"

    The following configuration and extension points are supported by Apicurio Studio and the kuadrantctl CLI:

    "},{"location":"kuadrantctl/doc/openapi-apicurio/#generate-an-http-route","title":"Generate an HTTP route","text":"

    To generate an HTTPRoute for the API, add the following x-kuadrant block to your API definition in Apicurio Studio, replacing values to match your API details and the location of your Gateway:

    x-kuadrant:\n    route:\n        name: petstore\n        namespace: petstore\n        hostnames:\n\n            - 'petstore.example.com'\n        parentRefs:\n            -   name: prod-web\n                namespace: kuadrant-multi-cluster-gateways\n                kind: Gateway\n

    For more details, see Generate Gateway API HTTPRoute object from OpenAPI 3.

    "},{"location":"kuadrantctl/doc/openapi-apicurio/#generate-an-authpolicy","title":"Generate an AuthPolicy","text":"

    To generate an AuthPolicy, add a securityScheme to the components block in your API definition. The following securityScheme requires that an API key header is set:

        securitySchemes:\n        api_key:\n            type: apiKey\n            name: api_key\n            in: header\n

    Although securityScheme is not an OpenAPI extension, it is used by kuadrantctl like the other extensions mentioned in this document.

    When added, Apicurio Studio will display the following update in the SECURITY SCHEMES section:

    For more details, see Generate Kuadrant AuthPolicy object from OpenAPI 3.

    "},{"location":"kuadrantctl/doc/openapi-apicurio/#generate-a-ratelimitpolicy","title":"Generate a RateLimitPolicy","text":"

    To generate a RateLimitPolicy for the API, add the following x-kuadrant block to a path in your API definition, replacing values to match your API details.

    paths:\n    /:\n        x-kuadrant:\n            backendRefs:\n                -\n                    name: petstore\n                    namespace: petstore\n                    port: 8080\n            rate_limit:\n                rates:\n                    -\n                        limit: 10\n                        duration: 10\n                        unit: second\n

    When added, Apicurio Studio will display the following update in the VENDOR-EXTENSiONS section for that specific path:

    For more details, see Generate Kuadrant RateLimitPolicy object from OpenAPI 3.

    "},{"location":"kuadrantctl/doc/openapi-apicurio/#additional-resources","title":"Additional resources","text":"
    • OpenAPI 3.0.x Kuadrant Extensions in the kuadrantctl documentation.
    • Apicurio Studio - Now with OpenAPI Vendor Extensions.
    "},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/","title":"OpenAPI 3.0.x Kuadrant extensions","text":"

    This reference information shows examples of how to add Kuadrant extensions at the root, path, or operation level in an OpenAPI 3.0.x definition.

    "},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/#root-level-kuadrant-extension","title":"Root-level Kuadrant extension","text":"

    You can add a Kuadrant extension at the root level of an OpenAPI definition. The following example shows an extension added for a petstore app:

    x-kuadrant:\n  route:  ## HTTPRoute metadata\n    name: \"petstore\"\n    namespace: \"petstore\"\n    labels:  ## map[string]string\n      deployment: petstore\n    hostnames:  ## []gateway.networking.k8s.io/v1beta1.Hostname\n\n      - example.com\n    parentRefs:  ## []gateway.networking.k8s.io/v1beta1.ParentReference\n      - name: apiGateway\n        namespace: gateways\n
    "},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/#path-level-kuadrant-extension","title":"Path-level Kuadrant extension","text":"

    You can add a Kuadrant extension at the path level of an OpenAPI definition. This configuration at the path level is the default when there is no operation-level configuration. The following example shows an extension added for a /cat path:

    paths:\n  /cat:\n    x-kuadrant:  ## Path-level Kuadrant extension\n      disable: true  ## Remove from the HTTPRoute. Optional. Default: false\n      pathMatchType: Exact ## Specifies how to match against the path value. Valid values: [Exact;PathPrefix]. Optional. Default: Exact\n      backendRefs:  ## Backend references to be included in the HTTPRoute. []gateway.networking.k8s.io/v1beta1.HTTPBackendRef. Optional.\n\n        - name: petstore\n          port: 80\n          namespace: petstore\n      rate_limit:  ## Rate limit configuration. Optional.\n        rates:   ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.Rate\n          - limit: 1\n            duration: 10\n            unit: second\n        counters:   ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.CountextSelector\n          - auth.identity.username\n        when:   ## Kuadrant API []github.com/kuadrant/kuadrant-operator/api/v1beta2.WhenCondition\n          - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n            operator: eq\n            value: alice\n
    "},{"location":"kuadrantctl/doc/openapi-kuadrant-extensions/#operation-level-kuadrant-extension","title":"Operation-level Kuadrant extension","text":"

    You can add a Kuadrant extension at the operation level of an OpenAPI definition. This extension uses the same schema as the path-level Kuadrant extension. The following example shows an extension added for a get operation:

    paths:\n  /cat:\n    get:\n      x-kuadrant:  ## Operation-level Kuadrant extension\n        disable: true  ## Remove from the HTTPRoute. Optional. Default: path level \"disable\" value.\n        pathMatchType: Exact ## Specifies how to match against the path value. Valid values: [Exact;PathPrefix]. Optional. Default: Exact.\n        backendRefs:  ## Backend references to be included in the HTTPRoute. Optional.\n\n          - name: petstore\n            port: 80\n            namespace: petstore\n        rate_limit:  ## Rate limit configuration. Optional.\n          rates:   ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.Rate\n            - limit: 1\n              duration: 10\n              unit: second\n          counters:   ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.CountextSelector\n            - auth.identity.username\n          when:   ## Kuadrant API github.com/kuadrant/kuadrant-operator/api/v1beta2.WhenCondition\n            - selector: metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.identity.userid\n              operator: eq\n              value: alice\n
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/","title":"Integrating Kuadrant OAS extensions with Red Hat OpenShift Dev Spaces","text":"

    OpenAPI extensions enhance the standard OpenAPI specification by adding custom functionality. Kuadrant OpenAPI extensions are identified by the x-kuadrant prefix. You can use OpenAPI extensions to integrate Kuadrant policies directly into your API definitions.

    Red Hat OpenShift Dev Spaces provides a browser-based, cloud-native IDE that supports rapid and decentralized development in container-based environments. This tutorial demonstrates how to use OpenShift Dev Spaces to modify an OpenAPI definition by incorporating Kuadrant policies, and then use the kuadrantctl CLI to create Kubernetes resources for both Gateway API and Kuadrant.

    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#prerequisites","title":"Prerequisites","text":"
    • You must have access to one of the following Dev Spaces instances:

    • A self-hosted OpenShift Dev Spaces instance.

    • An OpenShift Dev Spaces instance provided by the Red Hat Developer Sandbox.
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#procedure","title":"Procedure","text":""},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-1-setting-up-your-workspace","title":"Step 1 - Setting up your workspace","text":"

    Create a workspace in Dev Spaces for your project as follows:

    1. Fork the following repository: https://github.com/Kuadrant/blank-petstore.
    2. In Dev Spaces, select Create Workspace, and enter the URL of your forked repository. For example: https://github.com/<your-username>/blank-petstore.git.
    3. Click Create & Open.
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-2-configuring-vs-code-in-dev-spaces","title":"Step 2 - Configuring VS Code in Dev Spaces","text":"

    For this tutorial, you will perform the following tasks:

    • Install kuadrantctl in your workspace to demonstrate Kubernetes resource generation from your modified OpenAPI definition.
    • Optional: Configure Git with your username and email to enable pushing changes back to your repository.
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#install-the-kuadrantctl-cli","title":"Install the kuadrantctl CLI","text":"

    To install kuadrantctl in your Dev Spaces workspace, enter the following command:

    curl -sL \"https://github.com/kuadrant/kuadrantctl/releases/download/v0.2.3/kuadrantctl-v0.2.3-linux-amd64.tar.gz\" | tar xz -C /home/user/.local/bin\n

    This command installs kuadrantctl in /home/user/.local/bin, which is included in the container's $PATH by default.

    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#optional-configuring-git","title":"Optional: Configuring Git","text":"

    If you plan to push changes back to your repository, configure your Git username and email as follows:

    git config --global user.email \"foo@example.com\"\ngit config --global user.name \"Foo Example\"\n
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-3-adding-kuadrant-policies-to-your-openapi-definition","title":"Step 3 - Adding Kuadrant policies to your OpenAPI definition","text":"

    After creating your workspace, Dev Spaces will launch VS Code loaded with your forked repository. Navigate to the openapi.yaml file in the sample app to begin modifications.

    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#kuadrant-policies-overview","title":"Kuadrant policies overview","text":"

    You will enhance your API definition by applying Kuadrant policies to the following endpoints:

    • /pet/findByStatus
    • /user/login
    • /store/inventory

    In this tutorial, you will add Kuadrant policies to your API definition as follows:

    • Generate an HTTPRoute to expose these three routes for an existing Gateway.
    • Add API key authentication for the /user/login route, using a Kuadrant AuthPolicy and OAS securitySchemes.
    • Add a Kuadrant RateLimitPolicy to the /store/inventory endpoint, to limit the amount of requests this endpoint can receive.
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#defining-a-gateway","title":"Defining a Gateway","text":"

    Use the x-kuadrant extension in the root level to specify a Gateway. This information will be used to generate HTTPRoutes at the path level. For example:

    x-kuadrant:\n  route:  ## HTTPRoute metadata\n    name: \"petstore\"\n    namespace: \"petstore\"\n    labels:  ## map[string]string\n      deployment: petstore\n    hostnames:  ## []gateway.networking.k8s.io/v1beta1.Hostname\n\n      - example.com\n    parentRefs:  ## []gateway.networking.k8s.io/v1beta1.ParentReference\n      - name: apiGateway\n        namespace: gateways\n
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#specifying-httproutes-for-each-path","title":"Specifying HTTPRoutes for each path","text":"

    For each path, add an x-kuadrant extension with backendRefs to link your routes to your paths as follows:

      /pet/findByStatus:\n    x-kuadrant:\n      backendRefs:\n\n      - name: petstore\n        namespace: petstore\n        port: 8080\n    get:\n      # ...\n
      /user/login:\n    x-kuadrant:\n      backendRefs:\n\n      - name: petstore\n        namespace: petstore\n        port: 8080\n    get:\n      # ...\n
      /store/inventory:\n    x-kuadrant:\n      backendRefs:\n\n      - name: petstore\n        namespace: petstore\n        port: 8080\n    get:\n      # ...\n

    Note: The x-kuadrant extension at the path level applies to all HTTP methods defined in the path. For method-specific policies, move the extension inside the relevant HTTP method block, for example, get or post.

    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#implementing-authpolicy-and-security-schemes","title":"Implementing AuthPolicy and security schemes","text":"

    To secure the /user/login endpoint with API key authentication, use the following configuration:

      /user/login:\n    # ...\n    get:\n      security:\n\n      - api_key: []\n
    components:\n  schemas:\n    # ...\n  securitySchemes:\n    api_key:\n      type: apiKey\n      name: api_key\n      in: header\n

    This configuration generates an AuthPolicy that references an API key stored in a labeled Secret:

    apiVersion: v1\nkind: Secret\nmetadata:\n  name: petstore-api-key\n  namespace: petstore\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    kuadrant.io/apikeys-by: api_key\nstringData:\n  api_key: secret\ntype: Opaque\n
    For simplicity, this example uses a simple, static API key for your app.

    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#applying-a-ratelimitpolicy-to-an-endpoint","title":"Applying a RateLimitPolicy to an endpoint","text":"

    To enforce rate limiting on the /store/inventory endpoint, add the following x-kuadrant extension:

      /store/inventory:\n    get:\n      # ...\n      x-kuadrant:\n        backendRefs:\n          # ...\n        rate_limit:\n          rates:\n\n          - limit: 10\n            duration: 10\n            unit: second\n

    This limits to 10 requests every 10 seconds for the /store/inventory endpoint.

    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-4-generate-kubernetes-resources-by-using-kuadrantctl","title":"Step 4 - Generate Kubernetes resources by using kuadrantctl","text":"

    With your extensions in place, you can use kuadrantctl to generate the follollowing Kubernetes resources:

    • An HTTPRoute for your petstore app for each of your endpoints.
    • An AuthPolicy with a simple, static API key from a secret for the /user/login endpoint.
    • A RateLimitPolicy with a rate limit of 10 requests every 10 seconds for the /store/inventory endpoint.

    In Dev Spaces, select \u2630 > Terminal > New Terminal, and run the following commands:

    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#generate-an-httproute","title":"Generate an HTTPRoute","text":"
    kuadrantctl generate gatewayapi httproute --oas openapi.yaml\n

    This command outputs the following HTTPRoute:

    kind: HTTPRoute\napiVersion: gateway.networking.k8s.io/v1beta1\nmetadata:\n  name: petstore\n  namespace: petstore\n  creationTimestamp: null\n  labels:\n    deployment: petstore\nspec:\n  parentRefs:\n\n    - namespace: gateways\n      name: apiGateway\n  hostnames:\n    - example.com\n  rules:\n    - matches:\n        - path:\n            type: Exact\n            value: /api/v3/pet/findByStatus\n          method: GET\n      backendRefs:\n        - name: petstore\n          namespace: petstore\n          port: 8080\n    - matches:\n        - path:\n            type: Exact\n            value: /api/v3/store/inventory\n          method: GET\n      backendRefs:\n        - name: petstore\n          namespace: petstore\n          port: 8080\n    - matches:\n        - path:\n            type: Exact\n            value: /api/v3/user/login\n          method: GET\n      backendRefs:\n        - name: petstore\n          namespace: petstore\n          port: 8080\nstatus:\n  parents: null\n
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#generate-an-authpolicy","title":"Generate an AuthPolicy","text":"
    kuadrantctl generate kuadrant authpolicy --oas openapi.yaml\n

    This command outputs the following AuthPolicy:

    apiVersion: kuadrant.io/v1beta2\nkind: AuthPolicy\nmetadata:\n  name: petstore\n  namespace: petstore\n  creationTimestamp: null\n  labels:\n    deployment: petstore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: petstore\n    namespace: petstore\n  routeSelectors:\n\n    - matches:\n        - path:\n            type: Exact\n            value: /api/v3/user/login\n          method: GET\n  rules:\n    authentication:\n      GETuserlogin_api_key:\n        credentials:\n          customHeader:\n            name: api_key\n        apiKey:\n          selector:\n            matchLabels:\n              kuadrant.io/apikeys-by: api_key\n        routeSelectors:\n          - matches:\n              - path:\n                  type: Exact\n                  value: /api/v3/user/login\n                method: GET\nstatus: {}\n
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#generate-a-ratelimitpolicy","title":"Generate a RateLimitPolicy","text":"
    kuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml\n

    This command outputs the following RateLimitPolicy:

    apiVersion: kuadrant.io/v1beta2\nkind: RateLimitPolicy\nmetadata:\n  name: petstore\n  namespace: petstore\n  creationTimestamp: null\n  labels:\n    deployment: petstore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: petstore\n    namespace: petstore\n  limits:\n    GETstoreinventory:\n      routeSelectors:\n\n        - matches:\n            - path:\n                type: Exact\n                value: /api/v3/store/inventory\n              method: GET\n      rates:\n        - limit: 10\n          duration: 10\n          unit: second\nstatus: {}\n
    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#step-5-applying-resources-to-the-app","title":"Step 5 - Applying resources to the app","text":"

    Note: By default, the oc and kubectl commands in Dev Spaces target the cluster running Dev Spaces. If you want to apply resources to another cluster, you must log in with oc or kubectl to another cluster, and pass a different --context to these commands to apply resources to another cluster.

    You can now apply these policies to a running app by using kubectl or oc. If Dev Spaces is running on a cluster where Kuadrant is also installed, you can apply these resources as follows:

    kuadrantctl generate gatewayapi httproute --oas openapi.yaml | kubectl apply -f -\nkuadrantctl generate kuadrant authpolicy --oas openapi.yaml | kubectl apply -f -\nkuadrantctl generate kuadrant ratelimitpolicy --oas openapi.yaml | kubectl apply -f -\n

    Alternatively, you can use kuadrantctl as part of a CI/CD pipeline. For more details, see the kuadrantctl CI/CD guide.

    If you completed the optional Git configuration step, you can enter git commit to commit the these changes and push them to your fork.

    "},{"location":"kuadrantctl/doc/openapi-openshift-dev-spaces/#additional-resources","title":"Additional resources","text":"

    For more details, see the following documentation on using x-kuadrant OAS extensions with kuadrantctl:

    • OpenAPI 3.0.x Kuadrant extensions
    • Generate Gateway API HTTPRoutes with kuadrantctl
    • Generate Kuadrant AuthPolicy with kuadrantctl
    • Generate Kuadrant RateLimitPolicy with kuadrantctl
    • kuadrantctl CI/CD guide
    "},{"location":"dns-operator/","title":"DNS Operator","text":"

    The DNS Operator is a kubernetes based controller responsible for reconciling DNS Record custom resources. It interfaces with cloud DNS providers such as AWS, Google and Azure to bring the DNS zone into the state declared in these CRDs. One of the key use cases the DNS operator solves, is allowing complex DNS routing strategies such as Geo and Weighted to be expressed. This allows you to leverage DNS as the first layer of traffic management. These strategies increase in value as you works across multiple clusters. DNS operator can be deployed to multiple cluster and coordinate on a given zone allowing you to use a shared domain name to balance traffic based on your requirements.

    "},{"location":"dns-operator/#getting-started","title":"Getting Started","text":""},{"location":"dns-operator/#pre-setup","title":"Pre Setup","text":""},{"location":"dns-operator/#add-dns-provider-configuration","title":"Add DNS provider configuration","text":"

    NOTE: You can optionally skip this step but at least one DNS Provider Secret will need to be configured with valid credentials to use the DNS Operator.

    "},{"location":"dns-operator/#aws-provider-route53","title":"AWS Provider (Route53)","text":"

    make local-setup-aws-clean local-setup-aws-generate AWS_ACCESS_KEY_ID=<My AWS ACCESS KEY> AWS_SECRET_ACCESS_KEY=<My AWS Secret Access Key>\n
    More details about the AWS provider can be found here

    "},{"location":"dns-operator/#gcp-provider","title":"GCP Provider","text":"

    make local-setup-gcp-clean local-setup-gcp-generate GCP_GOOGLE_CREDENTIALS='<My GCP Credentials.json>' GCP_PROJECT_ID=<My GCP PROJECT ID>\n
    More details about the GCP provider can be found here

    "},{"location":"dns-operator/#azure-provider","title":"AZURE Provider","text":"
    make local-setup-azure-clean local-setup-azure-generate KUADRANT_AZURE_CREDENTIALS='<My Azure Credentials.json>'\n

    Info on generating service principal credentials here

    Get your resource group ID like so:

    az group show --resource-group <resource group name> | jq \".id\" -r\n

    Also give traffic manager contributor role:

    az role assignment create --role \"Traffic Manager Contributor\" --assignee $EXTERNALDNS_SP_APP_ID --scope <RESOURCE_GROUP_ID>\n

    Getting the zone ID can be achieved using the below command:

    az network dns zone show --name <my domain name> --resource-group <my resource group> --query \"{id:id,domain:name}\"\n

    "},{"location":"dns-operator/#running-controller-locally-default","title":"Running controller locally (default)","text":"
    1. Create local environment(creates kind cluster)

      make local-setup\n

    2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running):

    make run\n
    "},{"location":"dns-operator/#running-controller-on-the-cluster","title":"Running controller on the cluster","text":"
    1. Create local environment(creates kind cluster)

      make local-setup DEPLOY=true\n

    2. Verify controller deployment

      kubectl logs -f deployments/dns-operator-controller-manager -n dns-operator-system\n

    "},{"location":"dns-operator/#running-controller-on-existing-cluster","title":"Running controller on existing cluster","text":"

    You\u2019ll need a Kubernetes cluster to run against. You can use KIND to get a local cluster for testing, or run against a remote cluster. Note: Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster kubectl cluster-info shows).

    1. Apply Operator manifests

      kustomize build config/default | kubectl apply -f -\n

    2. Verify controller deployment

      kubectl logs -f deployments/dns-operator-controller-manager -n dns-operator-system\n

    "},{"location":"dns-operator/#development","title":"Development","text":""},{"location":"dns-operator/#e2e-test-suite","title":"E2E Test Suite","text":"

    The e2e test suite can be executed against any cluster running the DNS Operator with configuration added for any supported provider.

    make test-e2e TEST_DNS_ZONE_DOMAIN_NAME=<My domain name> TEST_DNS_PROVIDER_SECRET_NAME=<My provider secret name> TEST_DNS_NAMESPACES=<My test namespace(s)>\n
    Environment Variable Description TEST_DNS_PROVIDER_SECRET_NAME Name of the provider secret to use. If using local-setup provider secrets zones, one of [dns-provider-credentials-aws; dns-provider-credentials-gcp;dns-provider-credentials-azure] TEST_DNS_ZONE_DOMAIN_NAME The Domain name to use in the test. Must be a zone accessible with the (TEST_DNS_PROVIDER_SECRET_NAME) credentials with the same domain name TEST_DNS_NAMESPACES The namespace(s) where the provider secret(s) can be found"},{"location":"dns-operator/#modifying-the-api-definitions","title":"Modifying the API definitions","text":"

    If you are editing the API definitions, generate the manifests such as CRs or CRDs using:

    make manifests\n

    NOTE: Run make --help for more information on all potential make targets

    More information can be found via the Kubebuilder Documentation

    "},{"location":"dns-operator/#logging","title":"Logging","text":"

    Logs are following the general guidelines:

    • logger.Info() describe a high-level state of the resource such as creation, deletion and which reconciliation path was taken.
    • logger.Error() describe only those errors that are not returned in the result of the reconciliation. If error is occurred there should be only one error message.
    • logger.V(1).Info() debug level logs to give information about every change or event caused by the resource as well as every update of the resource.

    The --zap-devel argument will enable debug level logs for the output. Otherwise, all V() logs are ignored.

    "},{"location":"dns-operator/#common-metadata","title":"Common metadata","text":"

    Not exhaustive list of metadata for DNSRecord controller:

    • level - logging level. Values are: info,debug or error
    • ts - timestamp
    • logger - logger name
    • msg
    • controller and controllerKind - controller name, and it's kind respectively to output the log
    • DNSRecord - name and namespace of the DNS Record CR that is being reconciled
    • reconcileID
    • ownerID - ID the of owner of the DNS Record
    • txtPrefix/txtSuffix - prefix and suffix of the TXT record in provider.
    • zoneEndpoints - endpoints that exist in the provider
    • specEdnoinds - endpoints defined in the spec
    • statusEndpoints - endpoints that were processed previously

    Note that not all the metadata values are present at each of the logs statements.

    "},{"location":"dns-operator/#examples","title":"Examples","text":"

    To query logs locally you can use jq. For example: Retrieve logs by

    kubectl get deployments -l app.kubernetes.io/part-of=dns-operator -A\n\nNAMESPACE             NAME                              READY \ndns-operator-system   dns-operator-controller-manager   1/1   \n
    And query them. For example:
    kubectl logs -l control-plane=dns-operator-controller-manager -n dns-operator-system --tail -1 | sed '/^{/!d' | jq 'select(.controller==\"dnsrecord\" and .level==\"info\")'\n
    or
    kubectl logs -l control-plane=dns-operator-controller-manager -n dns-operator-system --tail -1 | sed '/^{/!d' | jq 'select(.controller==\"dnsrecord\" and .DNSRecord.name==\"test\" and .reconcileID==\"2be16b6d-b90f-430e-9996-8b5ec4855d53\")' | jq '.level, .msg, .zoneEndpoints, .specEndpoints, .statusEndpoints '\n
    You could use selector in the jq with and/not/or to restrict.

    "},{"location":"dns-operator/#license","title":"License","text":"

    Copyright 2024.

    Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0\n

    Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

    "},{"location":"dns-operator/docs/RELEASE/","title":"RELEASE","text":""},{"location":"dns-operator/docs/RELEASE/#release","title":"Release","text":""},{"location":"dns-operator/docs/RELEASE/#new-majorminor-version","title":"New Major.Minor version","text":"
    1. Create a new minor release branch from the HEAD of main:
      git checkout -b release-0.2\n
    2. Run prepare release:
      make prepare-release IMG_TAG=release-0.2 VERSION=0.2.0-dev CHANNELS=alpha REPLACES_VERSION=0.1.0\n
    3. Verify local changes, commit and push:
      git add .\ngit commit -m \"prepare-release: release-0.2\"\ngit push upstream release-0.2\n
    4. Verify that the build image workflow is triggered and completes for the new branch

    5. Do any final testing and bug fixing against the release branch, see Verify OLM Deployment

    6. Run prepare release for final version

      make prepare-release VERSION=0.2.0 CHANNELS=stable REPLACES_VERSION=0.1.0\n

    7. Verify local changes, commit, push and tag:
      git add .\ngit commit -m \"prepare-release: v0.2.0\"\ngit tag v0.2.0\ngit push upstream release-0.2\ngit push upstream v0.2.0\n
    8. Verify that the build release tag workflow is triggered and completes for the new tag

    9. Verify the new version can be installed from the catalog image, see Verify OLM Deployment

    10. Release to the community operator index catalogs.

    "},{"location":"dns-operator/docs/RELEASE/#new-patch-version","title":"New Patch version","text":"
    1. Checkout minor release branch:
      git checkout release-0.2\n
    2. Run prepare release:
      make prepare-release VERSION=0.2.1 CHANNELS=stable REPLACES_VERSION=0.2.0\n
    3. Verify local changes, commit and push:
      git add .\ngit commit -m \"prepare-release: v0.2.1\"\ngit tag v0.2.1\ngit push upstream release-0.2\ngit push upstream v0.2.1\n
    4. Verify that the build release tag workflow is triggered and completes for the new tag

    5. Verify the new version can be installed from the catalog image, see Verify OLM Deployment

    6. Release to the community operator index catalogs.

    "},{"location":"dns-operator/docs/RELEASE/#verify-olm-deployment","title":"Verify OLM Deployment","text":"
    1. Deploy the OLM catalog image:

      make local-setup install-olm deploy-catalog\n

    2. Wait for deployment:

      kubectl -n dns-operator-system wait --timeout=60s --for=condition=Available deployments --all\ndeployment.apps/dns-operator-controller-manager condition met\n

    3. Check the logs:

      kubectl -n dns-operator-system logs -f deployment/dns-operator-controller-manager\n

    4. Check the version:

      $ kubectl -n dns-operator-system get deployment dns-operator-controller-manager --show-labels\nNAME                              READY   UP-TO-DATE   AVAILABLE   AGE     LABELS\ndns-operator-controller-manager   1/1     1            1           5m42s   app.kubernetes.io/component=manager,app.kubernetes.io/created-by=dns-operator,\napp.kubernetes.io/instance=controller-manager,app.kubernetes.io/managed-by=kustomize,app.kubernetes.io/name=deployment,app.kubernetes.io/part-of=dns-operator,\ncontrol-plane=dns-operator-controller-manager,olm.deployment-spec-hash=1jPe8AuMpSKHh51nnDs4j25ZgoUrKhF45EP0Wa,olm.managed=true,olm.owner.kind=ClusterServiceVersion,\nolm.owner.namespace=dns-operator-system,olm.owner=dns-operator.v0.2.0-dev,operators.coreos.com/dns-operator.dns-operator-system=\n

    "},{"location":"dns-operator/docs/RELEASE/#community-operator-index-catalogs","title":"Community Operator Index Catalogs","text":"
    • Operatorhub Community Operators
    • Openshift Community Operators
    "},{"location":"dns-operator/docs/provider/","title":"Configuring a DNS Provider","text":"

    In order to be able to interact with supported DNS providers, Kuadrant needs a credential that it can use.

    "},{"location":"dns-operator/docs/provider/#supported-providers","title":"Supported Providers","text":"

    Kuadrant Supports the following DNS providers currently

    • AWS Route 53 (aws)
    • Google Cloud DNS (gcp)
    • Azure (azure)
    "},{"location":"dns-operator/docs/provider/#aws-route-53-provider","title":"AWS Route 53 Provider","text":"

    Kuadrant expects a Secret with a credential. Below is an example for AWS Route 53. It is important to set the secret type to aws:

    kubectl create secret generic my-aws-credentials \\\n  --namespace=kuadrant-dns-system \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=XXXX \\\n  --from-literal=AWS_REGION=eu-west-1 \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=XXX\n
    Key Example Value Description AWS_REGION eu-west-1 AWS Region AWS_ACCESS_KEY_ID XXXX AWS Access Key ID (see note on permissions below) AWS_SECRET_ACCESS_KEY XXXX AWS Secret Access Key"},{"location":"dns-operator/docs/provider/#aws-iam-permissions-required","title":"AWS IAM Permissions Required","text":"

    We have tested using the available policy AmazonRoute53FullAccess however it should also be possible to restrict the credential down to a particular zone. More info can be found in the AWS docs:

    https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/access-control-managing-permissions.html

    By default, Kuadrant will list the available zones and find the matching zone based on the listener host in the gateway listener. If it finds more than one matching zone for a given listener host, it will not update any of those zones. When providing a credential you should limit that credential down to just have write access to the zones you want Kuadrant to manage. Below is an example of a an AWS policy for doing this type of thing:

    {\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Sid\": \"VisualEditor0\",\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"route53:ListTagsForResources\",\n                \"route53:GetHealthCheckLastFailureReason\",\n                \"route53:GetHealthCheckStatus\",\n                \"route53:GetChange\",\n                \"route53:GetHostedZone\",\n                \"route53:ChangeResourceRecordSets\",\n                \"route53:ListResourceRecordSets\",\n                \"route53:GetHealthCheck\",\n                \"route53:UpdateHostedZoneComment\",\n                \"route53:UpdateHealthCheck\",\n                \"route53:CreateHealthCheck\",\n                \"route53:DeleteHealthCheck\",\n                \"route53:ListTagsForResource\",\n                \"route53:ListHealthChecks\",\n                \"route53:GetGeoLocation\",\n                \"route53:ListGeoLocations\",\n                \"route53:ListHostedZonesByName\",\n                \"route53:GetHealthCheckCount\"\n            ],\n            \"Resource\": [\n                \"arn:aws:route53:::hostedzone/Z08187901Y93585DDGM6K\",\n                \"arn:aws:route53:::healthcheck/*\",\n                \"arn:aws:route53:::change/*\"\n            ]\n        },\n        {\n            \"Sid\": \"VisualEditor1\",\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"route53:ListHostedZones\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n
    "},{"location":"dns-operator/docs/provider/#google-cloud-dns-provider","title":"Google Cloud DNS Provider","text":"

    Kuadant expects a secret with a credential. Below is an example for Google DNS. It is important to set the secret type to gcp:

    kubectl create secret generic my-test-gcp-credentials \\\n  --namespace=kuadrant-dns-system \\\n  --type=kuadrant.io/gcp \\\n  --from-literal=PROJECT_ID=xxx \\\n  --from-file=GOOGLE=$HOME/.config/gcloud/application_default_credentials.json\n
    Env Var Example Value Description GOOGLE {\"client_id\": \"***\",\"client_secret\": \"***\",\"refresh_token\": \"***\",\"type\": \"authorized_user\"} This is the JSON created from either the credential created by the gcloud CLI, or the JSON from the Service account PROJECT_ID my_project_id ID to the Google project"},{"location":"dns-operator/docs/provider/#google-cloud-dns-access-permissions-required","title":"Google Cloud DNS Access permissions required","text":"

    We have tested with the dns.admin role. See for more details:

    https://cloud.google.com/dns/docs/access-control#dns.admin

    "},{"location":"dns-operator/docs/provider/#azure-cloud-dns-provider","title":"Azure Cloud DNS Provider","text":"

    Kuadrant expects a Secret with a credential. Below is an example for Azure. It is important to set the secret type to azure:

    We recommend creating a new service principal for managing DNS. Azure Service Principal Docs

    # Create the service principal\n$ DNS_NEW_SP_NAME=kuadrantDnsPrincipal\n$ DNS_SP=$(az ad sp create-for-rbac --name $DNS_NEW_SP_NAME)\n$ DNS_SP_APP_ID=$(echo $DNS_SP | jq -r '.appId')\n$ DNS_SP_PASSWORD=$(echo $DNS_SP | jq -r '.password')\n
    "},{"location":"dns-operator/docs/provider/#azure-cloud-dns-access-permissions-required","title":"Azure Cloud DNS Access permissions required","text":"

    You will need to grant read and contributor access to the zone(s) you want managed for the service principal you are using.

    1) fetch DNS id used to grant access to the service principal

    DNS_ID=$(az network dns zone show --name example.com \\\n --resource-group ExampleDNSResourceGroup --query \"id\" --output tsv)\n\n# get yor resource group id\n\nRESOURCE_GROUP_ID=az group show --resource-group ExampleDNSResourceGroup | jq \".id\" -r\n
    "},{"location":"dns-operator/docs/provider/#provide-reader-access-to-the-resource-group","title":"provide reader access to the resource group","text":"

    $ az role assignment create --role \"Reader\" --assignee $DNS_SP_APP_ID --scope $DNS_ID

    "},{"location":"dns-operator/docs/provider/#provide-contributor-access-to-dns-zone-itself","title":"provide contributor access to DNS Zone itself","text":"

    $ az role assignment create --role \"Contributor\" --assignee $DNS_SP_APP_ID --scope $DNS_ID

    As we are setting up advanced traffic rules for GEO and Weighted responses you will also need to grant traffic manager access:

    az role assignment create --role \"Traffic Manager Contributor\" --assignee $DNS_SP_APP_ID --scope $RESOURCE_GROUP_ID\n
    cat <<-EOF > /local/path/to/azure.json\n{\n  \"tenantId\": \"$(az account show --query tenantId -o tsv)\",\n  \"subscriptionId\": \"$(az account show --query id -o tsv)\",\n  \"resourceGroup\": \"ExampleDNSResourceGroup\",\n  \"aadClientId\": \"$DNS_SP_APP_ID\",\n  \"aadClientSecret\": \"$DNS_SP_PASSWORD\"\n}\nEOF\n

    Finally setup the secret with the credential azure.json file

    kubectl create secret generic my-test-azure-credentials \\\n  --namespace=kuadrant-dns-system \\\n  --type=kuadrant.io/azure \\\n  --from-file=azure.json=/local/path/to/azure.json\n
    "},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/","title":"List of issues","text":"
    • Re-queue validation intermittently GH-36
    • Re-queue DNS Record whenever a write to the Cloud Provider occurs GH-35
    • Schedule removal of finalizer from DNS Records GH-38
    • Record write attempts in status for current generation GH-34
    "},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#the-idea","title":"The idea","text":"

    We now will constantly reconcile DNS records. The reasoning is that other controllers may override/change records in the DNS provider so there is a need to requeue the DNS Record from time to time even when no local changes are introduced.

    "},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#details","title":"Details","text":"

    There are a few new fields on the DNS Record status:

    • QueuedAt is a time when the DNS record was received for the reconciliation
    • ValidFor indicates the duration since the last reconciliation we consider data in the record to be valid
    • WriteCounter represents a number of consecutive write attempts on the same generation of the record. It is being reset to 0 when the generation changes or there are no changes to write.

    There is an option to override the ValidFor and DefaultRequeueTime with valid-for and requeue-time flags respectively.

    The DefaultRequeueTime is the duration between successful validation and the next reconciliation to ensure that the record is still up-to-date.

    The ValidFor is used to determine if we should do a full reconciliation when we get the record. If the record is still valid we will only update finalizers and validate the record itself. It will not perform anything that involves a DNS provider.

    "},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#dns-record-normal-lifecycle","title":"DNS Record normal lifecycle","text":"

    Once we enqueue the DNS record, controller will compile a list of changes to the DNS provider and will apply it. After this, the record is enqueued with the validationRequeueTime and the Ready condition will be marked as false with a message Awaiting Validation. When the record is received again and the controller ensures there are no changes needed (the ones applied are present in the DNS Provider) it sets the Ready condition to true and enqueues it with the defaultRequeueTime.

    Upon deletion, the process will be similar. The controller will determine the changes needed to the DNS provider and will apply them. The record will be requeued with the validationRequeueTime. Once we receive it back and ensure that there are no changes needed for the DNS provider we remove the finalizer from the record.

    The validationRequeueTime duration is randomized +/- 50%.

    "},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#when-things-go-south","title":"When things go south","text":"

    If the record is received prematurely - the ValidFor + QueuedAt is more than the current time - we requeue it again for the ValidFor duration.

    When we encounter an error during the reconciliation we will not requeue the record and will put in an appropriate error message in the log and on the record. In order for it to reconcile again there must be a change to the DNS Record CR.

    It is possible for a user to mess with the timestamps field or the ValidFor field. Kubernetes will not let setting an invalid value to the timestamp fields. Once the timestamp fields are set manually it will trigger reconciliation since there is a change in the record CR. The only one that could impact the controller is the QueuedAt field and the controller will believe that to be the last time the record was reconciled. As for the ValidFor: since it is a simple string it is possible to set an incorrect value. If we fail to parse it we treat the ValidFor as 0. This means that the controller will believe that the information in the record is expired and will probe the DNS provider for an update. If a valid value is provided controller will obey it. Eventually, the controller will naturally enqueue the record and those values will be overridden.

    In case the controller fails to retain changes in the DNS Provider: write are successful, but the validation fails again and the WriteCounter reaches the WriteCounterLimit we give up on the reconciliation. The appropriate message will be put under the Ready - false condition as well as in the logs of the controller. The reconciliation will resume once the generation of the DNS Record is changed.

    "},{"location":"dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/#metrics","title":"Metrics","text":"

    There is a metric emitted from the controller: dns_provider_write_counter. It reflects the WriteCounter field in the status of the record.

    "},{"location":"dns-operator/docs/reference/dnsrecord/","title":"The DNSRecord Custom Resource Definition (CRD)","text":"
    • DNSRecord
    • DNSRecordSpec
    • DNSRecordStatus
    "},{"location":"dns-operator/docs/reference/dnsrecord/#dnsrecord","title":"DNSRecord","text":"Field Type Required Description spec DNSRecordSpec Yes The specification for DNSRecord custom resource status DNSRecordStatus No The status for the custom resource"},{"location":"dns-operator/docs/reference/dnsrecord/#dnsrecordspec","title":"DNSRecordSpec","text":"Field Type Required Description ownerID String No Unique string used to identify the owner of this record. If unset an ownerID will be generated based on the record UID rootHost String Yes Single root host of all endpoints in a DNSRecord providerRef ProviderRef Yes Reference to a DNS Provider Secret endpoints []ExternalDNS Endpoint No Endpoints to manage in the dns provider healthCheck HealthCheckSpec No Health check configuration"},{"location":"dns-operator/docs/reference/dnsrecord/#providerref","title":"ProviderRef","text":"Field Type Required Description name String Yes Name of a dns provider secret"},{"location":"dns-operator/docs/reference/dnsrecord/#healthcheckspec","title":"HealthCheckSpec","text":"Field Type Required Description endpoint String Yes Endpoint is the path to append to the host to reach the expected health check port Number Yes Port to connect to the host on protocol String Yes Protocol to use when connecting to the host, valid values are \"HTTP\" or \"HTTPS\" failureThreshold Number Yes FailureThreshold is a limit of consecutive failures that must occur for a host to be considered unhealthy"},{"location":"dns-operator/docs/reference/dnsrecord/#dnsrecordstatus","title":"DNSRecordStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec conditions []Kubernetes meta/v1.Condition List of conditions that define the status of the resource queuedAt Kubernetes meta/v1.Time QueuedAt is a time when DNS record was received for the reconciliation validFor String ValidFor indicates duration since the last reconciliation we consider data in the record to be valid writeCounter Number WriteCounter represent a number of consecutive write attempts on the same generation of the record endpoints []ExternalDNS Endpoint Endpoints are the last endpoints that were successfully published by the provider healthCheck HealthCheckStatus Health check status ownerID String Unique string used to identify the owner of this record"},{"location":"dns-operator/docs/reference/dnsrecord/#healthcheckstatus","title":"HealthCheckStatus","text":"Field Type Description conditions []Kubernetes meta/v1.Condition List of conditions that define that status of the health checks probes []HealthCheckStatusProbe Health check Probe status"},{"location":"dns-operator/docs/reference/dnsrecord/#healthcheckstatusprobe","title":"HealthCheckStatusProbe","text":"Field Type Description id String The health check id ipAddress String The ip address being monitored host String The host being monitored synced Boolean Synced conditions []Kubernetes meta/v1.Condition List of conditions that define that status of the probe"}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":"

    Kuadrant combines Gateway API with gateway providers like Istio and Envoy Gateway to enhance application connectivity. It enables platform engineers and application developers to easily connect, secure, and protect their services and infrastructure across multiple clusters with policies for TLS, DNS, application authentication & authorization, and rate limiting. Additionally, Kuadrant offers observability templates to further support infrastructure management.

    "},{"location":"#getting-started","title":"Getting Started","text":"

    For a quick setup of Kuadrant, see our Getting Started guide. Alternatively, explore the architecture in our Architectural Overview.

    "},{"location":"getting-started/","title":"Getting Started","text":"

    This guide let's you quickly evaluate Kuadrant. You will need a Kubernetes cluster to try out Kuadrant. If you prefer, you can use the following steps to set up a local kind cluster.

    "},{"location":"getting-started/#kind-cluster-setup","title":"Kind Cluster Setup","text":"
    kind create cluster\n

    To use Kuadrant, the LoadBalancer service type is required for Gateways. kind does not have any built-in way to provide IP addresses to these service types. You can follow this guide to set up a LoadBalancer provider for kind.

    "},{"location":"getting-started/#installation-options","title":"Installation Options","text":"
    • Install with Helm
    • Install with OLM
    "},{"location":"getting-started/#further-reading","title":"Further Reading","text":"

    The documentation on this site follows the Di\u00e1taxis framework to better serve you, our users. This approach also helps us create new content and maintain existing material effectively. Under this framework, all content falls into one of four categories, accessible from the side navigation:

    • Concepts - (also called 'Explanations') Deepens and broadens your understanding of Kuadrant.
    • APIs & Reference - Provides concise descriptions of Kuadrant APIs for quick consultation.
    • Tutorials - Offers practical, step-by-step activities for you to safely try out.
    • Guides - Delivers goal-oriented instructions to help you solve specific problems in any environment.
    "},{"location":"install-helm/","title":"Install with Helm","text":""},{"location":"install-helm/#prerequisites","title":"Prerequisites","text":"
    • Kubernetes cluster with support for services of type LoadBalancer
    • kubectl CLI
    "},{"location":"install-helm/#basic-installation","title":"Basic Installation","text":"

    The latest helm installation instructions for the kuadrant operator are maintained at https://artifacthub.io/packages/helm/kuadrant/kuadrant-operator.

    After installing the operator, you can create a Kuadrant resource to install the operand components.

    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta1\nkind: Kuadrant\nmetadata:\n  name: kuadrant\n  namespace: kuadrant-system\nEOF\n

    If everything went well, the status of the resource should be Ready

    kubectl get kuadrant kuadrant -n kuadrant-system -o=jsonpath='{.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}'\n
    "},{"location":"install-helm/#next-steps","title":"Next Steps","text":"
    • Try out our Secure, protect, and connect guide
    "},{"location":"install-olm/","title":"Install and Configure Kuadrant and Sail via OLM using the kubectl CLI","text":"

    This document will walk you through setting up the required configuration to install kaudrant using kustomize or a tool that leverages kustomize such as kubectl along with OLM. It will also go through more advanced configuration options to enable building up a resilient configuration. You can view the full configuration built here: Full AWS Example.

    1. Basic Install

    2. Configure DNS and TLS integration

    3. External Redis for Rate Limit Counters

    4. Limitador Resilient Configuration

    5. Authorino Resilient Configuration

    6. [OpenShift Specific] Setup Observability

    "},{"location":"install-olm/#prerequisites","title":"Prerequisites","text":"
    • Kubernetes (or OpenShift) cluster with support for services of type LoadBalancer
    • kubectl CLI
    • OLM installed - (operator lifecycle manager releases)
    • Gateway provider installed
      • If you don't have a Gateway provider installed, steps are included in this guide to install the Sail Operator that will configure and install an Istio installation. Kuadrant is intended to work with Istio or Envoy Gateway.
    • (Optional) cert-manager for automated TLS capabilities:
      • cert-manager Operator for Red Hat OpenShift
      • installing cert-manager via OperatorHub
    • (Optional) Access to AWS, Azure or GCP with DNS service.
    • (Optional) Access to a Redis instance, for persistent storage for your rate limit counters.

    Note: for multiple clusters, it would make sense to do the installation via a tool like argocd. For other methods of addressing multiple clusters take a look at the kubectl docs

    "},{"location":"install-olm/#basic-installation","title":"Basic Installation","text":"

    This first step will install just Kuadrant at a given released version (post v1.x) in the kuadrant-system namespace and the Sail Operator. There will be no credentials/dns providers configured (This is the most basic setup but means TLSPolicy and DNSPolicy will not be able to be used).

    Start by creating the following kustomization.yaml in a directory locally. For the purpose of this doc, we will use: ~/kuadrant/ directory.

    export KUADRANT_DIR=~/kuadrant\nmkdir -p $KUADRANT_DIR/install\ntouch $KUADRANT_DIR/install/kustomization.yaml\n

    Add the below kustomisation CR to the kustomization.yaml created above:

    apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n  # choose the cluster preference that matches your scenario below. Set the version by adding ?ref=v1.0.1. Change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/standard?ref=v1.0.1 \n  # - https://github.com/Kuadrant/kuadrant-operator//config/install/openshift?ref=v1.0.1\n\npatches: # remove this subscription patch if you are installing a development version. It will then use the \"preview\" channel\n\n  - patch: |-\n      apiVersion: operators.coreos.com/v1alpha1\n      kind: Subscription\n      metadata:\n        name: kuadrant\n      spec:\n        source: kuadrant-operator-catalog\n        sourceNamespace: kuadrant-system\n        name: kuadrant-operator\n        channel: 'stable' #set to preview if not using a release (for example if using main)\n

    And execute the following to apply it to a cluster:

    # change the location depending on where you created the kustomization.yaml\nkubectl apply -k $KUADRANT_DIR/install\n
    "},{"location":"install-olm/#verify-the-operators-are-installed","title":"Verify the operators are installed:","text":"

    OLM should begin installing the dependencies for Kuadrant. To wait for them to be ready, run:

    kubectl -n kuadrant-system wait --timeout=160s --for=condition=Available deployments --all\n

    Note: you may see no matching resources found if the deployments are not yet present.

    Once OLM has finished installing the operators (this can take several minutes). You should see the following in the kuadrant-system namespace:

    kubectl get deployments -n kuadrant-system\n\n## Output\n# NAME                                    READY   UP-TO-DATE   AVAILABLE   AGE\n# authorino-operator                      1/1     1            1           83m\n# dns-operator-controller-manager         1/1     1            1           83m\n# kuadrant-console-plugin                 1/1     1            1           83m\n# kuadrant-operator-controller-manager    1/1     1            1           83m\n# limitador-operator-controller-manager   1/1     1            1           83m\n

    You can also view the subscription for information about the install:

    kubectl get subscription -n kuadrant-system -o=yaml\n
    "},{"location":"install-olm/#install-the-operand-components","title":"Install the operand components","text":"

    Kuadrant has 2 additional operand components that it manages: Authorino that provides data plane auth integration and Limitador that provides data plane rate limiting. To set these up lets add a new kustomization.yaml in a new sub directory. We will re-use this later for further configuration. We do this as a separate step as we want to have the operators installed first.

    Add the following to your local directory. For the purpose of this doc, we will use: $KUADRANT_DIR/configure/kustomization.yaml.

    mkdir -p $KUADRANT_DIR/configure\ntouch $KUADRANT_DIR/configure/kustomization.yaml\n

    Add the following to the new kustomization.yaml:

    apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/configure/standard?ref=v1.0.1 #change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n

    Lets apply this to your cluster:

    kubectl apply -k $KUADRANT_DIR/configure\n
    "},{"location":"install-olm/#verify-kuadrant-is-installed-and-ready","title":"Verify Kuadrant is installed and ready:","text":"
    kubectl get kuadrant kuadrant -n kuadrant-system -o=jsonpath='{.status.conditions[?(@.type==\"Ready\")].message}{\"\\n\"}'\n

    You should see the message kuadrant is ready.

    "},{"location":"install-olm/#verify-istio-is-configured-and-ready","title":"Verify Istio is configured and ready:","text":"
    kubectl wait istio/default --for=condition=ready=true\n

    At this point Kuadrant is installed and ready to be used as is Istio as the gateway provider. This means AuthPolicy and RateLimitPolicy can now be configured and used to protect any Gateways you create.

    "},{"location":"install-olm/#configure-dns-and-tls-integration","title":"Configure DNS and TLS integration","text":"

    In this section will build on the previous steps and expand the kustomization.yaml we created in $KUADRANT_DIR/configure.

    In order for cert-manager and the Kuadrant DNS operator to be able to access and manage DNS records and setup TLS certificates and provide external connectivity for your endpoints, you need to setup a credential for these components. To do this, we will use a Kubernetes secret via a kustomize secret generator. You can find other example overlays for each supported cloud provider under the configure directory.

    An example lets-encrypt certificate issuer is provided, but for more information on certificate issuers take a look at the cert-manager documentation.

    Lets modify our existing local kustomize overlay to setup these secrets and the cluster certificate issuer:

    First you will need to setup the required .env file specified in the kuztomization.yaml file in the same directory as your existing configure kustomization. Below is an example for AWS:

    touch $KUADRANT_DIR/configure/aws-credentials.env\n
    Add the following to your new file

    AWS_ACCESS_KEY_ID=xxx\nAWS_SECRET_ACCESS_KEY=xxx\nAWS_REGION=eu-west-1\n

    With this setup, lets update our configure kustomization to generate the needed secrets. We will also define a TLS ClusterIssuer (see below). The full kustomization.yaml file should look like:

    apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/configure/standard?ref=v1.0.1 #change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n  - cluster-issuer.yaml #(comment if you dont want to use it. The issuer yaml is defined below). Ensure you name the file correctly.\n\n\ngeneratorOptions:\n  disableNameSuffixHash: true\n  labels:\n    app.kubernetes.io/part-of: kuadrant\n    app.kubernetes.io/managed-by: kustomize\n\nsecretGenerator:\n\n  - name: aws-provider-credentials\n    namespace: cert-manager # assumes cert-manager namespace exists.\n    envs:\n      - aws-credentials.env # notice this matches the .env file above. You will need to setup this file locally\n    type: 'kuadrant.io/aws'\n  - name: aws-provider-credentials\n    namespace: gateway-system # this is the namespace where your gateway will be provisioned\n    envs:\n      - aws-credentials.env #notice this matches the .env file above. you need to set up this file locally first. \n    type: 'kuadrant.io/aws'\n

    Below is an example Lets-Encrypt Cluster Issuer that uses the aws credential we setup above. Create this in the same directory as the configure kustomization.yaml:

    touch $KUADRANT_DIR/configure/cluster-issuer.yaml\n

    Add the following to this new file:

    # example lets-encrypt cluster issuer that will work with the credentials we will add\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n  name: lets-encrypt-aws\nspec:\n  acme:\n    privateKeySecretRef:\n      name: le-secret\n    server: https://acme-v02.api.letsencrypt.org/directory\n    solvers:\n\n      - dns01:\n          route53:\n            accessKeyIDSecretRef:\n              key: AWS_ACCESS_KEY_ID\n              name: aws-provider-credentials #notice this matches the name of the secret we created.\n            region: us-east-1 #override if needed\n            secretAccessKeySecretRef:\n              key: AWS_SECRET_ACCESS_KEY\n              name: aws-provider-credentials\n

    To apply our changes (note this doesn't need to be done in different steps, but is done so here to illustrate how you can build up your configuration of Kuadrant) execute:

    kubectl apply -k $KUADRANT_DIR/configure\n

    The cluster issuer should become ready:

    kubectl wait clusterissuer/lets-encrypt-aws --for=condition=ready=true\n

    We create two credentials. One for use with DNSPolicy in the gateway-system namespace and one for use by cert-manager in the cert-manager namespace. With these credentials in place and the cluster issuer configured. You are now ready to start using DNSPolicy and TLSPolicy to secure and connect your Gateways.

    "},{"location":"install-olm/#use-an-external-redis","title":"Use an External Redis","text":"

    To connect Limitador (the component responsible for rate limiting requests) to redis so that its counters are stored and can be shared with other limitador instances follow these steps:

    Again we will build on the kustomization we started. In the same way we did for the cloud provider credentials, we need to setup a redis-credential.env file in the same directory as the kustomization.

    touch $KUADRANT_DIR/configure/redis-credentials.env\n

    Add the redis connection string to this file in the following format:

    URL=redis://xxxx\n

    Next we need to add a new secret generator to our existing configure file at $KUADRANT_DIR/configure/kustomization.yaml add it below the other secretGenerators

      - name: redis-credentials\n    namespace: kuadrant-system\n    envs:\n      - redis-credentials.env\n    type: 'kuadrant.io/redis'\n

    We also need to patch the existing Limitador resource. Add the following to the $KUADRANT_DIR/configure/kustomization.yaml

    patches:\n\n  - patch: |-\n      apiVersion: limitador.kuadrant.io/v1alpha1\n      kind: Limitador\n      metadata:\n        name: limitador\n        namespace: kuadrant-system\n      spec:\n        storage:\n          redis:\n            configSecretRef:\n              name: redis-credentials\n

    Your full kustomize.yaml will now be:

    apiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/configure/standard?ref=v1.0.1 #change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n  - cluster-issuer.yaml #(comment if you dont want to use it. The issuer yaml is defined below). Ensure you name the file correctly.\n\n\ngeneratorOptions:\n  disableNameSuffixHash: true\n  labels:\n    app.kubernetes.io/part-of: kuadrant\n    app.kubernetes.io/managed-by: kustomize\n\nsecretGenerator:\n\n  - name: aws-provider-credentials\n    namespace: cert-manager # assumes cert-manager namespace exists.\n    envs:\n      - aws-credentials.env # notice this matches the .env file above. You will need to setup this file locally\n    type: 'kuadrant.io/aws'\n  - name: aws-provider-credentials\n    namespace: gateway-system # this is the namespace where your gateway will be provisioned\n    envs:\n      - aws-credentials.env #notice this matches the .env file above. you need to set up this file locally first.\n    type: 'kuadrant.io/aws'\n  - name: redis-credentials\n    namespace: kuadrant-system\n    envs:\n      - redis-credentials.env\n    type: 'kuadrant.io/redis'\n\npatches:\n\n  - patch: |-\n      apiVersion: limitador.kuadrant.io/v1alpha1\n      kind: Limitador\n      metadata:\n        name: limitador\n        namespace: kuadrant-system\n      spec:\n        storage:\n          redis:\n            configSecretRef:\n              name: redis-credentials\n

    Re-Apply the configuration to setup the new secret and configuration:

    kubectl apply -k $KUADRANT_DIR/configure/\n

    Limitador is now configured to use the provided redis connection URL as a data store for rate limit counters. Limitador will become temporarily unavailable as it restarts.

    "},{"location":"install-olm/#validate","title":"Validate","text":"

    Validate Kuadrant is in a ready state as before:

    kubectl get kuadrant kuadrant -n kuadrant-system -o=wide\n\n# NAME       STATUS   AGE\n# kuadrant   Ready    61m\n
    "},{"location":"install-olm/#resilient-deployment-of-data-plane-components","title":"Resilient Deployment of data plane components","text":""},{"location":"install-olm/#limitador-topologyconstraints-poddisruptionbudget-and-resource-limits","title":"Limitador: TopologyConstraints, PodDisruptionBudget and Resource Limits","text":"

    To set limits, replicas and a PodDisruptionBudget for limitador you can add the following to the existing limitador patch in your local limitador in the $KUADRANT_DIR/configure/kustomize.yaml spec:

    pdb:\n  maxUnavailable: 1\nreplicas: 2\nresourceRequirements:\n    requests:\n      cpu: 10m\n      memory: 10Mi # set these based on your own needs.\n

    re-apply the configuration. This will result in two instances of limitador becoming available and a podDisruptionBudget being setup:

    kubectl apply -k $KUADRANT_DIR/configure/\n

    For topology constraints, you will need to patch the limitador deployment directly:

    add the below yaml to a limitador-topoloy-patch.yaml file under a $KUADRANT_DIR/configure/patches directory:

    mkdir -p $KUADRANT_DIR/configure/patches\ntouch $KUADRANT_DIR/configure/patches/limitador-topoloy-patch.yaml\n
    spec:\n  template:\n    spec:\n      topologySpreadConstraints:\n\n        - maxSkew: 1\n          topologyKey: kubernetes.io/hostname\n          whenUnsatisfiable: ScheduleAnyway\n          labelSelector:\n            matchLabels:\n              limitador-resource: limitador\n        - maxSkew: 1\n          topologyKey: kubernetes.io/zone\n          whenUnsatisfiable: ScheduleAnyway\n          labelSelector:\n            matchLabels:\n              limitador-resource: limitador\n

    Apply this to the existing limitador deployment

    kubectl patch deployment limitador-limitador -n kuadrant-system --patch-file $KUADRANT_DIR/configure/patches/limitador-topoloy-patch.yaml\n
    "},{"location":"install-olm/#authorino-topologyconstraints-poddisruptionbudget-and-resource-limits","title":"Authorino: TopologyConstraints, PodDisruptionBudget and Resource Limits","text":"

    To increase the number of replicas for Authorino add a new patch to the $KUADRANT_DIR/configure/kustomization.yaml

      - patch: |-\n      apiVersion: operator.authorino.kuadrant.io/v1beta1\n      kind: Authorino\n      metadata:\n        name: authorino\n        namespace: kuadrant-system\n      spec:\n        replicas: 2\n

    and re-apply the configuration:

    kubectl apply -k $KUADRANT_DIR/configure/\n

    To add resource limits and or topology constraints to Authorino. You will need to patch the Authorino deployment directly: Add the below yaml to a authorino-topoloy-patch.yaml under the $KUADRANT_DIR/configure/patches directory:

    touch $KUADRANT_DIR/configure/patches/authorino-topoloy-patch.yaml\n
    spec:\n  template:\n    spec:\n      containers:\n\n        - name: authorino\n          resources:\n            requests:\n              cpu: 10m # set your own needed limits here\n              memory: 10Mi # set your own needed limits here\n      topologySpreadConstraints:\n        - maxSkew: 1\n          topologyKey: kubernetes.io/hostname\n          whenUnsatisfiable: ScheduleAnyway\n          labelSelector:\n            matchLabels:\n              authorino-resource: authorino\n        - maxSkew: 1\n          topologyKey: kubernetes.io/zone\n          whenUnsatisfiable: ScheduleAnyway\n          labelSelector:\n            matchLabels:\n              authorino-resource: authorino\n

    Apply the patch:

    kubectl patch deployment authorino -n kuadrant-system --patch-file $KUADRANT_DIR/configure/patches/authorino-topoloy-patch.yaml\n

    Kuadrant is now installed and ready to use and the data plane components are configured to be distributed and resilient.

    For reference the full configure kustomization should look like:

    kind: Kustomization\nresources:\n\n  - https://github.com/Kuadrant/kuadrant-operator//config/install/configure/standard?ref=v1.0.1 #change this version as needed (see https://github.com/Kuadrant/kuadrant-operator/releases)\n  - cluster-issuer.yaml\ngeneratorOptions:\n  disableNameSuffixHash: true\n  labels:\n    app.kubernetes.io/part-of: kuadrant\n    app.kubernetes.io/managed-by: kustomize\n\nsecretGenerator:\n\n  - name: aws-provider-credentials\n    namespace: cert-manager # assumes cert-manager namespace exists.\n    envs:\n      - aws-credentials.env # notice this matches the .env file above. You will need to setup this file locally\n    type: 'kuadrant.io/aws'\n  - name: aws-provider-credentials\n    namespace: gateway-system # this is the namespace where your gateway will be provisioned\n    envs:\n      - aws-credentials.env #notice this matches the .env file above. you need to set up this file locally first.\n    type: 'kuadrant.io/aws'\n  - name: redis-credentials\n    namespace: kuadrant-system\n    envs:\n      - redis-credentials.env\n    type: 'kuadrant.io/redis'\n\npatches:\n\n  - patch: |-\n      apiVersion: limitador.kuadrant.io/v1alpha1\n      kind: Limitador\n      metadata:\n        name: limitador\n        namespace: kuadrant-system\n      spec:\n        pdb:\n          maxUnavailable: 1\n        replicas: 2\n        resourceRequirements:\n          requests:\n            cpu: 10m\n            memory: 10Mi # set these based on your own needs.\n        storage:\n          redis:\n            configSecretRef:\n              name: redis-credentials\n  - patch: |-\n      apiVersion: operator.authorino.kuadrant.io/v1beta1\n      kind: Authorino\n      metadata:\n        name: authorino\n        namespace: kuadrant-system\n      spec:\n        replicas: 2\n
    The configure directory should contain the following:

    configure/\n\u251c\u2500\u2500 aws-credentials.env\n\u251c\u2500\u2500 cluster-issuer.yaml\n\u251c\u2500\u2500 kustomization.yaml\n\u251c\u2500\u2500 patches\n\u2502\u00a0\u00a0 \u251c\u2500\u2500 authorino-topoloy-patch.yaml\n\u2502\u00a0\u00a0 \u2514\u2500\u2500 limitador-topoloy-patch.yaml\n\u2514\u2500\u2500 redis-credentials.env\n
    "},{"location":"install-olm/#set-up-observability-openshift-only","title":"Set up observability (OpenShift Only)","text":"

    Verify that user workload monitoring is enabled in your Openshift cluster. If it not enabled, check the Openshift documentation for how to do this.

    kubectl get configmap cluster-monitoring-config -n openshift-monitoring -o jsonpath='{.data.config\\.yaml}'|grep enableUserWorkload\n# (expected output)\n# enableUserWorkload: true\n

    Install the gateway & Kuadrant metrics components and configuration, including Grafana.

    # change the version as needed\nkubectl apply -k https://github.com/Kuadrant/kuadrant-operator//config/install/configure/observability?ref=v1.0.1\n

    Configure the Openshift thanos-query instance as a data source in Grafana.

    TOKEN=\"Bearer $(oc whoami -t)\"\nHOST=\"$(kubectl -n openshift-monitoring get route thanos-querier -o jsonpath='https://{.status.ingress[].host}')\"\necho \"TOKEN=$TOKEN\" > config/observability/openshift/grafana/datasource.env\necho \"HOST=$HOST\" >> config/observability/openshift/grafana/datasource.env\nkubectl apply -k config/observability/openshift/grafana\n

    Create the example dashboards in Grafana

    kubectl apply -k https://github.com/Kuadrant/kuadrant-operator/examples/dashboards?ref=v1.0.1\n

    Access the Grafana UI, using the default user/pass of root/secret. You should see the example dashboards in the 'monitoring' folder. For more information on the example dashboards, check out the documentation.

    kubectl -n monitoring get routes grafana-route -o jsonpath=\"https://{.status.ingress[].host}\"\n
    "},{"location":"install-olm/#next-steps","title":"Next Steps","text":"
    • Try out our Secure, protect, and connect guide
    "},{"location":"kuadrant-operator/doc/install/mtls-configuration/","title":"Configure mTLS between the Gateway and Kuadrant components","text":""},{"location":"kuadrant-operator/doc/install/mtls-configuration/#overview","title":"Overview","text":"

    This guide includes manual steps to enable mTLS between an Istio provided gateway and the Kuadrant components. If you use an AuthPolicy or RateLimitPolicy, there will be communication between the gateway and the respective Kuadrant components at request time. This communication happens between the Wasm plugin in Envoy proxy, and Authorino or Limitador. At the time of writing there is an RFC discussing how to add mTLS capabilities as a feature of the Kuadrant operator. If you are interested in having that feature or influencing how it is delivered, please engage on that pull request.

    Note

    This method currently only works if the Gateway is provided by Istio, with service mesh capabilities enabled across the cluster. For example, the Istio CNI agent is running on each node.

    "},{"location":"kuadrant-operator/doc/install/mtls-configuration/#prerequisites","title":"Prerequisites","text":"
    • You have installed Kuadrant in a Kubernetes cluster.
    • Additionally, you have at least 1 AuthPolicy or RateLimitPolicy attached to your Gateway or HTTPRoute.
    "},{"location":"kuadrant-operator/doc/install/mtls-configuration/#enabling-mtls","title":"Enabling mTLS","text":""},{"location":"kuadrant-operator/doc/install/mtls-configuration/#kuadrant-components","title":"Kuadrant components","text":"

    As the Kuadrant components (Authorino & Limitador) are already part of the service mesh in Istio, mTLS can be enabled after an Envoy proxy sidecar is deployed alongside them. To do this, apply the Istio sidecar label to both Deployment templates.

    kubectl -n kuadrant-system patch deployment authorino \\\n  -p '{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"sidecar.istio.io/inject\":\"true\"}}}}}'\n\nkubectl -n kuadrant-system patch deployment limitador-limitador \\\n  -p '{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"sidecar.istio.io/inject\":\"true\"}}}}}'\n

    You should see the number of containers in either pod increase from 1 to 2, as the istio-proxy is added to the pods. This change will force all traffic to those pods to go through the proxy. However, mTLS is not enabled yet.

    "},{"location":"kuadrant-operator/doc/install/mtls-configuration/#envoy-filter","title":"Envoy Filter","text":"

    The next step enables mTLS for traffic originating in the gateway (where the Wasm plugin executes), going to the Kuadrant components. This requires modifying the EnvoyFilters directly.

    Note

    Any changes to the EnvoyFilters may be reverted by the Kuadrant operator when related resources like Gateways, HTTPRoutes or policies are modified. It is recommended to automate the next step, for example via a job or GitOps controller, to ensure the changes persist.

    The EnvoyFilter resources will typically have a name prefix of kuadrant- in the same namespace as your Gateway. Add the snippet below to the spec.configPatches[].patch.value value in each EnvoyFilter.

            transport_socket:\n          name: envoy.transport_sockets.tls\n          typed_config:\n            '@type': type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n            common_tls_context:\n              tls_certificate_sds_secret_configs:\n\n              - name: default\n                sds_config:\n                  api_config_source:\n                    api_type: GRPC\n                    grpc_services:\n                    - envoy_grpc:\n                        cluster_name: sds-grpc\n              validation_context_sds_secret_config:\n                name: ROOTCA\n                sds_config:\n                  api_config_source:\n                    api_type: GRPC\n                    grpc_services:\n                    - envoy_grpc:\n                        cluster_name: sds-grpc\n

    The envoy.transport_sockets.tls transport socket name tells Envoy to use the built-in TLS transport socket, enabling TLS encryption. The @type specifies that the configuration follows the UpstreamTlsContext message from Envoy's TLS transport socket extension. This is used for client-side TLS settings. The tls_certificate_sds_secret_configs configures Envoy to obtain client certificates and private keys via the Secret Discovery Service (SDS) over GRPC. The validation_context_sds_secret_config configures Envoy to obtain the root CA certificates via SDS (over GRPC) to validate the server's certificate.

    "},{"location":"kuadrant-operator/doc/install/mtls-configuration/#istio-configuration","title":"Istio configuration","text":"

    The last step is to ensure Authorino and Limitador are configured to require and accept mTLS connections. In Istio, this is done by creating a PeerAuthentication resource where the mtls mode is set to STRICT. The below command will enable STRICT mode on all pods with Istio sidecar injection in the kuadrant-system namespace.

    kubectl apply -f - <<EOF\napiVersion: security.istio.io/v1\nkind: PeerAuthentication\nmetadata:\n  name: default\n  namespace: kuadrant-system\nspec:\n  mtls:\n    mode: STRICT\nEOF\n

    If you prefer to only enable mTLS for a specific component, you can modify just the EnvoyFilter and Deployment for that component. Then, when creating the PeerAuthentication resource, you can be more specific about what pods the mTLS mode apply to. For example, the following resource would enable STRICT mode just for the Limitador component.

    apiVersion: security.istio.io/v1\nkind: PeerAuthentication\nmetadata:\n  name: limitador-mtls\n  namespace: kuadrant-system\nspec:\n  selector:\n    matchLabels:\n      app: limitador\n  mtls:\n    mode: STRICT\n
    "},{"location":"kuadrant-operator/doc/install/mtls-configuration/#disabling-mtls","title":"Disabling mTLS","text":"

    To disable mTLS, remove the transport_socket changes from any EnvoyFilters. Then you can either set the mTLS mode to PERMISSIVE in the PeerAuthentication resource:

    kubectl patch peerauthentication default -n kuadrant-system --type='merge' -p '{\"spec\":{\"mtls\":{\"mode\":\"PERMISSIVE\"}}}'\n

    Or delete the resource:

    kubectl delete peerauthentication -n kuadrant-system default\n

    You don't have to remove the sidecar from the Kuadrant components, but it is safe to do so by removing the sidecar.istio.io/inject label:

    kubectl -n kuadrant-system patch deployment authorino \\\n  --type='json' \\\n  -p='[{\"op\": \"remove\", \"path\": \"/spec/template/metadata/labels/sidecar.istio.io~1inject\"}]'\n\nkubectl -n kuadrant-system patch deployment limitador-limitador \\\n  --type='json' \\\n  -p='[{\"op\": \"remove\", \"path\": \"/spec/template/metadata/labels/sidecar.istio.io~1inject\"}]'\n

    Or set the value to false:

    kubectl -n kuadrant-system patch deployment authorino \\\n  -p '{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"sidecar.istio.io/inject\":\"false\"}}}}}'\n\n\nkubectl -n kuadrant-system patch deployment limitador-limitador \\\n  -p '{\"spec\":{\"template\":{\"metadata\":{\"labels\":{\"sidecar.istio.io/inject\":\"false\"}}}}}'\n
    "},{"location":"kuadrant-operator/doc/observability/examples/","title":"Example Dashboards and Alerts","text":"

    Explore a variety of starting points for monitoring your Kuadrant installation with our examples folder. These dashboards and alerts are ready-to-use and easily customizable to fit your environment.

    There are some example dashboards uploaded to Grafana.com . You can use the ID's listed below to import these dashboards into Grafana:

    Name ID App Developer Dashboard 21538 Business User Dashboard 20981 Platform Engineer Dashboard 20982 DNS Operator Dashboard 22695"},{"location":"kuadrant-operator/doc/observability/examples/#dashboards","title":"Dashboards","text":""},{"location":"kuadrant-operator/doc/observability/examples/#importing-dashboards-into-grafana","title":"Importing Dashboards into Grafana","text":"

    For more details on how to import dashboards into Grafana, visit the import dashboards page.

    • UI Method:
      • JSON - Use the 'Import' feature in the Grafana UI to upload dashboard JSON files directly.
      • ID - Use the 'Import' feature in the Grafana UI to import via Grafana.com using a Dashboard ID.
    • ConfigMap Method: Automate dashboard provisioning by adding files to a ConfigMap, which should be mounted at /etc/grafana/provisioning/dashboards.

    Datasources are configured as template variables, automatically integrating with your existing data sources. Metrics for these dashboards are sourced from Prometheus. For more details on the metrics used, visit the metrics documentation page.

    "},{"location":"kuadrant-operator/doc/observability/examples/#alerts","title":"Alerts","text":""},{"location":"kuadrant-operator/doc/observability/examples/#setting-up-alerts-in-prometheus","title":"Setting Up Alerts in Prometheus","text":"

    You can integrate the example alerts into Prometheus as PrometheusRule resources. Feel free to adjust alert thresholds to suit your specific operational needs.

    Additionally, Service Level Objective (SLO) alerts generated with Sloth are included. A benefit of these alerts is the ability to integrate them with this SLO dashboard, which utilizes generated labels to comprehensively overview your SLOs.

    Further information on the metrics used for these alerts can be found on the metrics page.

    "},{"location":"kuadrant-operator/doc/observability/metrics/","title":"Metrics","text":"

    This is a reference page for some of the different metrics used in example dashboards and alerts. It is not an exhaustive list. The documentation for each component may provide more details on a per-component basis. Some of the metrics are sourced from components outside the Kuadrant project, for example, Envoy. The value of this reference is showing some of the more widely desired metrics, and how to join the metrics from different sources together in a meaningful way.

    "},{"location":"kuadrant-operator/doc/observability/metrics/#metrics-sources","title":"Metrics sources","text":"
    • Kuadrant components
    • Istio
    • Envoy
    • Kube State Metrics
    • Gateway API State Metrics
    • Kubernetes metrics
    "},{"location":"kuadrant-operator/doc/observability/metrics/#resource-usage-metrics","title":"Resource usage metrics","text":"

    Resource metrics, like CPU, memory and disk usage, primarily come from the Kubernetes metrics components. These include container_cpu_usage_seconds_total, container_memory_working_set_bytes and kubelet_volume_stats_used_bytes. A stable list of metrics is maintained in the Kubernetes repository. These low-level metrics typically have a set of recording rules that aggregate values by labels and time ranges. For example, node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate or namespace_workload_pod:kube_pod_owner:relabel. If you have deployed the kube-prometheus project, you should have the majority of these metrics being scraped.

    "},{"location":"kuadrant-operator/doc/observability/metrics/#networking-metrics","title":"Networking metrics","text":"

    Low-level networking metrics like container_network_receive_bytes_total are also available from the Kubernetes metrics components. HTTP & GRPC traffic metrics with higher level labels are available from Istio. One of the main metrics would be istio_requests_total, which is a counter incremented for every request handled by an Istio proxy. Latency metrics are available via the istio_request_duration_milliseconds metric, with buckets for varying response times.

    Some example dashboards have panels that make use of the request URL path. The path is not added as a label to Istio metrics by default, as it has the potential to increase metric cardinality, and thus storage requirements. If you want to make use of the path in your queries or visualisations, you can enable the request path metric via the Telemetry resource in istio:

    apiVersion: telemetry.istio.io/v1alpha1\nkind: Telemetry\nmetadata:\n  name: namespace-metrics\n  namespace: gateway-system\nspec:\n  metrics:\n\n  - providers:\n    - name: prometheus\n    overrides:\n    - match:\n        metric: REQUEST_COUNT\n      tagOverrides:\n        request_url_path:\n          value: \"request.url_path\"\n    - match:      \n        metric: REQUEST_DURATION\n      tagOverrides:\n        request_url_path:\n          value: \"request.url_path\"\n
    "},{"location":"kuadrant-operator/doc/observability/metrics/#state-metrics","title":"State metrics","text":"

    The kube-state-metrics project exposes the state of various kuberenetes resources as metrics and labels. For example, the ready status of a Pod is available as kube_pod_status_ready, with labels for the pod name and namespace. This can be useful for linking lower level container metrics back to a meaningful resource in the Kubernetes world.

    "},{"location":"kuadrant-operator/doc/observability/metrics/#joining-metrics","title":"Joining metrics","text":"

    Metric queries can be as simple as just the name of the metric, or can be complex with joining & grouping. A lot of the time it can be useful to tie back low level metrics to more meaningful Kubernetes resources. For example, if the memory usage is maxed out on a container and that container is constantly being OOMKilled, it can be useful to get the Deployment and Namespace of that container for debugging. Prometheus query language (or promql) allows vector matching or results (sometimes called joining).

    When using Gateway API and Kuadrant resources like HTTPRoute and RateLimitPolicy, the state metrics can be joined to Istio metrics to give a meaningful result set. Here's an example that queries the number of requests per second, and includes the name of the HTTPRoute that the traffic is for.

    sum(\n    rate(\n        istio_requests_total{}[5m]\n    )\n) by (destination_service_name)\n\n\n* on(destination_service_name) group_right \n    label_replace(gatewayapi_httproute_labels{}, \\\"destination_service_name\\\", \\\"$1\\\",\\\"service\\\", \\\"(.+)\\\")\n

    Breaking this query down, there are 2 parts. The first part is getting the rate of requests hitting the Istio gateway, aggregated to 5m intervals:

    sum(\n    rate(\n        destination_service_name{}[5m]\n    )\n) by (destination_service_name)\n

    The result set here will include a label for the destination service name (i.e. the Service in Kubernetes). This label is key to looking up the HTTPRoute this traffic belongs to.

    The 2nd part of the query uses the gatewayapi_httproute_labels metric and the label_replace function. The gatewayapi_httproute_labels metric gives a list of all httproutes, including any labels on them. The HTTPRoute in this example has a label called 'service', set to be the same as the Istio service name. This allows us to join the 2 results set. However, because the label doesn't match exactly (destination_service_name and service), we can replace the label so that it does match. That's what the label_replace does.

        label_replace(gatewayapi_httproute_labels{}, \\\"destination_service_name\\\", \\\"$1\\\",\\\"service\\\", \\\"(.+)\\\")\n

    The 2 parts are joined together using vector matching.

    * on(destination_service_name) group_right \n
    • * is the binary operator i.e. multiplication (gives join like behaviour)
    • on() specifies which labels to \"join\" the 2 results with
    • group_right enables a one to many matching.

    See the Prometheus documentation for further details on matching.

    "},{"location":"kuadrant-operator/doc/observability/tracing/","title":"Enabling tracing with a central collector","text":""},{"location":"kuadrant-operator/doc/observability/tracing/#introduction","title":"Introduction","text":"

    This guide outlines the steps to enable tracing in Istio and Kuadrant components (Authorino and Limitador), directing traces to a central collector for improved observability and troubleshooting. We'll also explore a typical troubleshooting flow using traces and logs.

    "},{"location":"kuadrant-operator/doc/observability/tracing/#prerequisites","title":"Prerequisites","text":"
    • A Kubernetes cluster with Istio and Kuadrant installed.
    • A trace collector (e.g., Jaeger or Tempo) configured to support OpenTelemetry (OTel).
    "},{"location":"kuadrant-operator/doc/observability/tracing/#configuration-steps","title":"Configuration Steps","text":""},{"location":"kuadrant-operator/doc/observability/tracing/#istio-tracing-configuration","title":"Istio Tracing Configuration","text":"

    Enable tracing in Istio by using the Telemetry API. Depending on your method for installing Istio, you will need to configure a tracing extensionProvider in your MeshConfig, Istio or IstioOperator resource as well. Here is an example Telemetry and Istio config to sample 100% of requests, if using the Istio Sail Operator.

    apiVersion: telemetry.istio.io/v1alpha1\nkind: Telemetry\nmetadata:\n  name: mesh-default\n  namespace: gateway-system\nspec:\n  tracing:\n\n  - providers:\n    - name: tempo-otlp\n    randomSamplingPercentage: 100\n---\napiVersion: operator.istio.io/v1alpha1\nkind: Istio\nmetadata:\n  name: default\nspec:\n  namespace: gateway-system\n  values:\n    meshConfig:\n      defaultConfig:\n        tracing: {}\n      enableTracing: true\n      extensionProviders:\n      - name: tempo-otlp\n        opentelemetry:\n          port: 4317\n          service: tempo.tempo.svc.cluster.local\n

    Important:

    The OpenTelemetry collector protocol should be explicitly set in the service port name or appProtocol fields as per the Istio documentation. For example, when using gRPC, the port name should begin with grpc- or the appProtocol should be grpc.

    "},{"location":"kuadrant-operator/doc/observability/tracing/#kuadrant-tracing-configuration","title":"Kuadrant Tracing Configuration","text":"

    The Authorino and Limitador components have request tracing capabilities. Here is an example configuration to enable and send traces to a central collector. Ensure the collector is the same one that Istio is sending traces so that they can be correlated later.

    apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  tracing:\n    endpoint: rpc://tempo.tempo.svc.cluster.local:4317\n    insecure: true\n---\napiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador\nspec:\n  tracing:\n    endpoint: rpc://tempo.tempo.svc.cluster.local:4317\n

    Once the changes are applied, the authorino and limitador components will be redeployed tracing enabled.

    Note:

    There are plans to consolidate the tracing configuration to a single location i.e. the Kuadrant CR. This will eventually eliminate the need to configure tracing in both the Authorino and Limitador CRs.

    Important:

    Currently, trace IDs do not propagate to wasm modules in Istio/Envoy, affecting trace continuity in Limitador. This means that requests passed to limitador will not have the relavant 'parent' trace ID in its trace information. If however the trace initiation point is outside of Envoy/Istio, the 'parent' trace ID will be available to limitador and included in traces passed to the collector. This has an impact on correlating traces from limitador with traces from authorino, the gateway and any other components in the path of requests.

    "},{"location":"kuadrant-operator/doc/observability/tracing/#troubleshooting-flow-using-traces-and-logs","title":"Troubleshooting Flow Using Traces and Logs","text":"

    Using a tracing interface like the Jaeger UI or Grafana, you can search for trace information by the trace ID. You may get the trace ID from logs, or from a header in a sample request you want to troubleshoot. You can also search for recent traces, filtering by the service you want to focus on.

    Here is an example trace in the Grafana UI showing the total request time from the gateway (Istio), the time to check the curent rate limit count (and update it) in limitador and the time to check auth in Authorino:

    In limitador, it is possible to enable request logging with trace IDs to get more information on requests. This requires the log level to be increased to at least debug, so the verbosity must be set to 3 or higher in the Limitador CR. For example:

    apiVersion: limitador.kuadrant.io/v1alpha1\nkind: Limitador\nmetadata:\n  name: limitador\nspec:\n  verbosity: 3\n

    A log entry will look something like this, with the traceparent field holding the trace ID:

    \"Request received: Request { metadata: MetadataMap { headers: {\"te\": \"trailers\", \"grpc-timeout\": \"5000m\", \"content-type\": \"application/grpc\", \"traceparent\": \"00-4a2a933a23df267aed612f4694b32141-00f067aa0ba902b7-01\", \"x-envoy-internal\": \"true\", \"x-envoy-expected-rq-timeout-ms\": \"5000\"} }, message: RateLimitRequest { domain: \"default/toystore\", descriptors: [RateLimitDescriptor { entries: [Entry { key: \"limit.general_user__f5646550\", value: \"1\" }, Entry { key: \"metadata.filter_metadata.envoy\\\\.filters\\\\.http\\\\.ext_authz.identity.userid\", value: \"alice\" }], limit: None }], hits_addend: 1 }, extensions: Extensions }\"\n

    If you centrally aggregate logs using something like promtail and loki, you can jump between trace information and the relevant logs for that service:

    Using a combination of tracing and logs, you can visualise and troubleshoot reuqest timing issues and drill down to specific services. This method becomes even more powerful when combined with metrics and dashboards to get a more complete picture of your users traffic.

    "},{"location":"kuadrant-operator/doc/overviews/auth/","title":"Kuadrant Auth","text":"

    A Kuadrant AuthPolicy custom resource:

    1. Targets Gateway API networking resources such as HTTPRoutes and Gateways, using these resources to obtain additional context, i.e., which traffic workload (HTTP attributes, hostnames, user attributes, etc) to enforce auth.
    2. Supports targeting subsets (sections) of a network resource to apply the auth rules to.
    3. Abstracts the details of the underlying external authorization protocol and configuration resources, that have a much broader remit and surface area.
    4. Enables cluster operators to set defaults that govern behavior at the lower levels of the network, until a more specific policy is applied.
    "},{"location":"kuadrant-operator/doc/overviews/auth/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/overviews/auth/#envoys-external-authorization-protocol","title":"Envoy's External Authorization Protocol","text":"

    Kuadrant's Auth implementation relies on the Envoy's External Authorization protocol. The workflow per request goes:

    1. On incoming request, the gateway checks the matching rules for enforcing the auth rules, as stated in the AuthPolicy custom resources and targeted Gateway API networking objects
    2. If the request matches, the gateway sends one CheckRequest to the external auth service (\"Authorino\").
    3. The external auth service responds with a CheckResponse back to the gateway with either an OK or DENIED response code.

    An AuthPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external auth service.

    "},{"location":"kuadrant-operator/doc/overviews/auth/#the-authpolicy-custom-resource","title":"The AuthPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/overviews/auth/#overview","title":"Overview","text":"

    The AuthPolicy spec includes the following parts:

    • A reference to an existing Gateway API resource (spec.targetRef)
    • Authentication/authorization scheme (spec.rules)
    • Top-level additional conditions (spec.when)
    • List of named patterns (spec.patterns)

    The auth scheme specify rules for:

    • Authentication (spec.rules.authentication)
    • External auth metadata fetching (spec.rules.metadata)
    • Authorization (spec.rules.authorization)
    • Custom response items (spec.rules.response)
    • Callbacks (spec.rules.callbacks)

    Each auth rule can declare specific when conditions for the rule to apply.

    The auth scheme (rules), as well as conditions and named patterns can be declared at the top-level level of the spec (with the semantics of defaults) or alternatively within explicit defaults or overrides blocks.

    Check out the API reference for a full specification of the AuthPolicy CRD.

    "},{"location":"kuadrant-operator/doc/overviews/auth/#using-the-authpolicy","title":"Using the AuthPolicy","text":""},{"location":"kuadrant-operator/doc/overviews/auth/#targeting-a-httproute-networking-resource","title":"Targeting a HTTPRoute networking resource","text":"

    When an AuthPolicy targets a HTTPRoute, the policy is enforced to all traffic routed according to the rules and hostnames specified in the HTTPRoute, across all Gateways referenced in the spec.parentRefs field of the HTTPRoute.

    The targeted HTTPRoute's rules and/or hostnames to which the policy must be enforced can be filtered to specific subsets.

    Target a HTTPRoute by setting the spec.targetRef field of the AuthPolicy as follows:

    apiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: my-route-auth\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: <HTTPRoute Name>\n  rules: { \u2026 }\n
    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510             \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (Infra namespace) \u2502             \u2502   (App namespace)  \u2502\n\u2502                   \u2502             \u2502                    \u2502\n\u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u2502  parentRefs \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510     \u2502\n\u2502  \u2502 Gateway \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2524 HTTPRoute \u2502     \u2502\n\u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2502             \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518     \u2502\n\u2502                   \u2502             \u2502        \u25b2           \u2502\n\u2502                   \u2502             \u2502        \u2502           \u2502\n\u2502                   \u2502             \u2502        \u2502           \u2502\n\u2502                   \u2502             \u2502        \u2502 targetRef \u2502\n\u2502                   \u2502             \u2502        \u2502           \u2502\n\u2502                   \u2502             \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502\n\u2502                   \u2502             \u2502  \u2502 AuthPolicy \u2502    \u2502\n\u2502                   \u2502             \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502\n\u2502                   \u2502             \u2502                    \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518             \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
    "},{"location":"kuadrant-operator/doc/overviews/auth/#hostnames-and-wildcards","title":"Hostnames and wildcards","text":"

    If an AuthPolicy targets a route defined for *.com and another AuthPolicy targets another route for api.com, the Kuadrant control plane will not merge these two AuthPolicies. Rather, it will mimic the behavior of gateway implementation by which the \"most specific hostname wins\", thus enforcing only the corresponding applicable policies and auth rules.

    E.g., a request coming for api.com will be protected according to the rules from the AuthPolicy that targets the route for api.com; while a request for other.com will be protected with the rules from the AuthPolicy targeting the route for *.com.

    Example with 3 AuthPolicies and 3 HTTPRoutes:

    • AuthPolicy A \u2192 HTTPRoute A (a.toystore.com)
    • AuthPolicy B \u2192 HTTPRoute B (b.toystore.com)
    • AuthPolicy W \u2192 HTTPRoute W (*.toystore.com)

    Expected behavior:

    • Request to a.toystore.com \u2192 AuthPolicy A will be enforced
    • Request to b.toystore.com \u2192 AuthPolicy B will be enforced
    • Request to other.toystore.com \u2192 AuthPolicy W will be enforced
    "},{"location":"kuadrant-operator/doc/overviews/auth/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"

    An AuthPolicy that targets a Gateway can declare a block of defaults (spec.defaults) or a block of overrides (spec.overrides). As a standard, gateway policies that do not specify neither defaults nor overrides, act as defaults.

    When declaring defaults, an AuthPolicy which targets a Gateway will be enforced to all HTTP traffic hitting the gateway, unless a more specific AuthPolicy targeting a matching HTTPRoute exists. Any new HTTPRoute referrencing the gateway as parent will be automatically covered by the default AuthPolicy, as well as changes in the existing HTTPRoutes.

    Defaults provide cluster operators with the ability to protect the infrastructure against unplanned and malicious network traffic attempt, such as by setting preemptive \"deny-all\" policies on hostnames and hostname wildcards.

    Inversely, a gateway policy that specify overrides declares a set of rules to be enforced on all routes attached to the gateway, thus atomically replacing any more specific policy occasionally attached to any of those routes.

    Target a Gateway HTTPRoute by setting the spec.targetRef field of the AuthPolicy as follows:

    apiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: my-gw-auth\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n  defaults: # alternatively: `overrides`\n    rules: { \u2026 }\n
    \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510             \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (Infra namespace) \u2502             \u2502   (App namespace)  \u2502\n\u2502                   \u2502             \u2502                    \u2502\n\u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u2502  parentRefs \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510     \u2502\n\u2502  \u2502 Gateway \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u253c\u2500\u2500\u2524 HTTPRoute \u2502     \u2502\n\u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2502             \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518     \u2502\n\u2502       \u25b2           \u2502             \u2502        \u25b2           \u2502\n\u2502       \u2502           \u2502             \u2502        \u2502           \u2502\n\u2502       \u2502           \u2502             \u2502        \u2502           \u2502\n\u2502       \u2502 targetRef \u2502             \u2502        \u2502 targetRef \u2502\n\u2502       \u2502           \u2502             \u2502        \u2502           \u2502\n\u2502 \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502             \u2502  \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510    \u2502\n\u2502 \u2502 AuthPolicy \u2502    \u2502             \u2502  \u2502 AuthPolicy \u2502    \u2502\n\u2502 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502             \u2502  \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518    \u2502\n\u2502                   \u2502             \u2502                    \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518             \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
    "},{"location":"kuadrant-operator/doc/overviews/auth/#overlapping-gateway-and-httproute-authpolicies","title":"Overlapping Gateway and HTTPRoute AuthPolicies","text":"

    Two possible semantics are to be considered here \u2013 gateway policy defaults vs gateway policy overrides.

    Gateway AuthPolicies that declare defaults (or alternatively neither defaults nor overrides) protect all traffic routed through the gateway except where a more specific HTTPRoute AuthPolicy exists, in which case the HTTPRoute AuthPolicy prevails.

    Example with 4 AuthPolicies, 3 HTTPRoutes and 1 Gateway default (plus 2 HTTPRoute and 2 Gateways without AuthPolicies attached):

    • AuthPolicy A \u2192 HTTPRoute A (a.toystore.com) \u2192 Gateway G (*.com)
    • AuthPolicy B \u2192 HTTPRoute B (b.toystore.com) \u2192 Gateway G (*.com)
    • AuthPolicy W \u2192 HTTPRoute W (*.toystore.com) \u2192 Gateway G (*.com)
    • AuthPolicy G (defaults) \u2192 Gateway G (*.com)

    Expected behavior:

    • Request to a.toystore.com \u2192 AuthPolicy A will be enforced
    • Request to b.toystore.com \u2192 AuthPolicy B will be enforced
    • Request to other.toystore.com \u2192 AuthPolicy W will be enforced
    • Request to other.com (suppose a route exists) \u2192 AuthPolicy G will be enforced
    • Request to yet-another.net (suppose a route and gateway exist) \u2192 No AuthPolicy will be enforced

    Gateway AuthPolicies that declare overrides protect all traffic routed through the gateway, regardless of existence of any more specific HTTPRoute AuthPolicy.

    Example with 4 AuthPolicies, 3 HTTPRoutes and 1 Gateway override (plus 2 HTTPRoute and 2 Gateways without AuthPolicies attached):

    • AuthPolicy A \u2192 HTTPRoute A (a.toystore.com) \u2192 Gateway G (*.com)
    • AuthPolicy B \u2192 HTTPRoute B (b.toystore.com) \u2192 Gateway G (*.com)
    • AuthPolicy W \u2192 HTTPRoute W (*.toystore.com) \u2192 Gateway G (*.com)
    • AuthPolicy G (overrides) \u2192 Gateway G (*.com)

    Expected behavior:

    • Request to a.toystore.com \u2192 AuthPolicy G will be enforced
    • Request to b.toystore.com \u2192 AuthPolicy G will be enforced
    • Request to other.toystore.com \u2192 AuthPolicy G will be enforced
    • Request to other.com (suppose a route exists) \u2192 AuthPolicy G will be enforced
    • Request to yet-another.net (suppose a route and gateway exist) \u2192 No AuthPolicy will be enforced
    "},{"location":"kuadrant-operator/doc/overviews/auth/#when-conditions","title":"when conditions","text":"

    when conditions can be used to scope an AuthPolicy or auth rule within an AuthPolicy (i.e. to filter the traffic to which a policy or policy rule applies) without any coupling to the underlying network topology.

    Use when conditions to conditionally activate policies and policy rules based on attributes that cannot be expressed in the HTTPRoutes' spec.hostnames and spec.rules.matches fields, or in general in AuthPolicies that target a Gateway.

    when conditions in an AuthPolicy are compatible with Authorino conditions, thus supporting complex boolean expressions with AND and OR operators, as well as grouping.

    The selectors within the when conditions of an AuthPolicy are a subset of Kuadrant's Well-known Attributes (RFC 0002). Check out the reference for the full list of supported selectors.

    Authorino JSON path string modifiers can also be applied to the selectors within the when conditions of an AuthPolicy.

    "},{"location":"kuadrant-operator/doc/overviews/auth/#examples","title":"Examples","text":"

    Check out the following user guides for examples of protecting services with Kuadrant:

    • Enforcing authentication & authorization with Kuadrant AuthPolicy, for app developers and platform engineers
    • Authenticated Rate Limiting for Application Developers
    • Authenticated Rate Limiting with JWTs and Kubernetes RBAC
    "},{"location":"kuadrant-operator/doc/overviews/auth/#known-limitations","title":"Known limitations","text":"
    • One HTTPRoute can only be targeted by one AuthPolicy.
    • One Gateway can only be targeted by one AuthPolicy.
    • AuthPolicies can only target HTTPRoutes/Gateways defined within the same namespace of the AuthPolicy.
    • 2+ AuthPolicies cannot target network resources that define/inherit the same exact hostname.
    "},{"location":"kuadrant-operator/doc/overviews/auth/#limitation-multiple-network-resources-with-identical-hostnames","title":"Limitation: Multiple network resources with identical hostnames","text":"

    Kuadrant currently does not support multiple AuthPolicies simultaneously targeting network resources that declare identical hostnames. This includes multiple HTTPRoutes that specify the same hostnames in the spec.hostnames field, as well as HTTPRoutes that specify a hostname that is identical to a hostname specified in a listener of one of the route's parent gateways or HTTPRoutes that don't specify any hostname at all thus inheriting the hostnames from the parent gateways. In any of these cases, a maximum of one AuthPolicy targeting any of those resources that specify identical hostnames is allowed.

    Moreover, having multiple resources that declare identical hostnames may lead to unexpected behavior and therefore should be avoided.

    This limitation is rooted at the underlying components configured by Kuadrant for the implementation of its policies and the lack of information in the data plane regarding the exact route that is honored by the API gateway at each specific request, in cases of conflicting hostnames.

    To exemplify one way this limitation can impact deployments, consider the following topology:

                     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                 \u2502   Gateway    \u2502\n                 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n          \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners:   \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n          \u2502      \u2502 - host: *.io \u2502       \u2502\n          \u2502      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518       \u2502\n          \u2502                             \u2502\n          \u2502                             \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510        \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502     HTTPRoute     \u2502        \u2502     HTTPRoute     \u2502\n\u2502     (route-a)     \u2502        \u2502     (route-b)     \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524        \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames:        \u2502        \u2502 hostnames:        \u2502\n\u2502 - app.io          \u2502        \u2502 - app.io          \u2502\n\u2502 rules:            \u2502        \u2502 rules:            \u2502\n\u2502 - matches:        \u2502        \u2502 - matches:        \u2502\n\u2502   - path:         \u2502        \u2502   - path:         \u2502\n\u2502       value: /foo \u2502        \u2502       value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518        \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n          \u25b2                            \u25b2\n          \u2502                            \u2502\n    \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510               \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n    \u2502 AuthPolicy \u2502               \u2502 AuthPolicy \u2502\n    \u2502 (policy-1) \u2502               \u2502 (policy-2) \u2502\n    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518               \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n

    In the example above, with the policy-1 resource created before policy-2, policy-1 will be enforced on all requests to app.io/foo while policy-2 will be rejected. I.e. app.io/bar will not be secured. In fact, the status conditions of policy-2 shall reflect Enforced=false with message \"AuthPolicy has encountered some issues: AuthScheme is not ready yet\".

    Notice the enforcement of policy-1 and no enforcement of policy-2 is the opposite behavior as the analogous problem with the Kuadrant RateLimitPolicy.

    A slightly different way the limitation applies is when two or more routes of a gateway declare the exact same hostname and a gateway policy is defined with expectation to set default rules for the cases not covered by more specific policies. E.g.:

                                        \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                         \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524 AuthPolicy \u2502\n                         \u2502          \u2502 (policy-2) \u2502\n                         \u25bc          \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                 \u2502   Gateway    \u2502\n                 \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n          \u250c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502 listeners:   \u2502\u25c4\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n          \u2502      \u2502 - host: *.io \u2502       \u2502\n          \u2502      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518       \u2502\n          \u2502                             \u2502\n          \u2502                             \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510        \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502     HTTPRoute     \u2502        \u2502     HTTPRoute     \u2502\n\u2502     (route-a)     \u2502        \u2502     (route-b)     \u2502\n\u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524        \u251c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2524\n\u2502 hostnames:        \u2502        \u2502 hostnames:        \u2502\n\u2502 - app.io          \u2502        \u2502 - app.io          \u2502\n\u2502 rules:            \u2502        \u2502 rules:            \u2502\n\u2502 - matches:        \u2502        \u2502 - matches:        \u2502\n\u2502   - path:         \u2502        \u2502   - path:         \u2502\n\u2502       value: /foo \u2502        \u2502       value: /bar \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518        \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n          \u25b2\n          \u2502\n    \u250c\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n    \u2502 AuthPolicy \u2502\n    \u2502 (policy-1) \u2502\n    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n

    Once again, requests to app.io/foo will be protected under AuthPolicy policy-1, while requests to app.io/bar will not be protected under any policy at all, unlike expected gateway policy policy-2 enforced as default. Both policies will report status condition as Enforced nonetheless.

    To avoid these problems, use different hostnames in each route.

    "},{"location":"kuadrant-operator/doc/overviews/auth/#implementation-details","title":"Implementation details","text":"

    Under the hood, for each AuthPolicy, Kuadrant creates an Istio AuthorizationPolicy and an Authorino AuthConfig custom resources.

    Only requests that matches the rules in the Istio AuthorizationPolicy cause an authorization request to be sent to the external authorization service (\"Authorino\"), i.e., only requests directed to the HTTPRouteRules targeted by the AuthPolicy (directly or indirectly), according to the declared top-level route selectors (if present), or all requests for which a matching HTTPRouteRule exists (otherwise).

    Authorino looks up for the auth scheme (AuthConfig custom resource) to enforce using the provided hostname of the original request as key. It then checks again if the request matches at least one of the selected HTTPRouteRules, in which case it enforces the auth scheme.

    Exception to the rule Due to limitations imposed by the Istio `AuthorizationPolicy`, there are a few patterns of HTTPRouteRules that cannot be translated to filters for the external authorization request. Therefore, the following patterns used in HTTPRouteMatches of top-level route selectors of an AuthPolicy will not be included in the Istio AuthorizationPolicy rules that trigger the check request with Authorino: `PathMatchRegularExpression`, `HeaderMatchRegularExpression`, and `HTTPQueryParamMatch`. As a consequence to the above, requests that do not match these rules and otherwise would not be checked with Authorino will result in a request to the external authorization service. Authorino nonetheless will still verify those patterns and ensure the auth scheme is enforced only when it matches a selected HTTPRouteRule. Users of Kuadrant may observe an unnecessary call to the authorization service in those cases where the request is out of the scope of the AuthPolicy and therefore always authorized."},{"location":"kuadrant-operator/doc/overviews/auth/#internal-custom-resources-and-namespaces","title":"Internal custom resources and namespaces","text":"

    While the Istio AuthorizationPolicy needs to be created in the same namespace as the gateway workload, the Authorino AuthConfig is created in the namespace of the AuthPolicy itself. This allows to simplify references such as to Kubernetes Secrets referred in the AuthPolicy, as well as the RBAC to support the architecture.

    "},{"location":"kuadrant-operator/doc/overviews/dns/","title":"Kuadrant DNS","text":"

    A Kuadrant DNSPolicy custom resource:

    Targets Gateway API networking resources Gateways to provide dns management by managing the lifecycle of dns records in external dns providers such as AWS Route53 and Google DNS.

    "},{"location":"kuadrant-operator/doc/overviews/dns/#how-it-works","title":"How it works","text":"

    A DNSPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external DNS service. The needed dns names are gathered from the listener definitions and the IPAdresses | CNAME hosts are gathered from the status block of the gateway resource.

    "},{"location":"kuadrant-operator/doc/overviews/dns/#the-dnspolicy-custom-resource","title":"The DNSPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/overviews/dns/#overview","title":"Overview","text":"

    The DNSPolicy spec includes the following parts:

    • A reference to an existing Gateway API resource (spec.targetRef)
    • LoadBalancing specification (spec.loadBalancing)
    • HealthCheck specification (spec.healthCheck)

    Check out the API reference for a full specification of the DNSPolicy CRD.

    "},{"location":"kuadrant-operator/doc/overviews/dns/#using-the-dnspolicy","title":"Using the DNSPolicy","text":""},{"location":"kuadrant-operator/doc/overviews/dns/#dns-provider-setup","title":"DNS Provider Setup","text":"

    A DNSPolicy acts against a target Gateway by processing its listeners for hostnames that it can create dns records for. In order for it to do this, it must know about the dns provider. This is done through the creation of dns provider secrets containing the credentials and configuration for the dns provider account.

    If for example a Gateway is created with a listener with a hostname of echo.apps.hcpapps.net:

    apiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: my-gw\nspec:\n  listeners:\n\n    - allowedRoutes:\n        namespaces:\n          from: All\n      name: api\n      hostname: echo.apps.hcpapps.net\n      port: 80\n      protocol: HTTP\n

    In order for the DNSPolicy to act upon that listener, a DNS provider Secret must exist for that hostnames' domain.

    apiVersion: v1\nkind: Secret\nmetadata:\n  name: my-aws-credentials\n  namespace: <Gateway Namespace>\ndata:\n  AWS_ACCESS_KEY_ID: <AWS_ACCESS_KEY_ID>\n  AWS_REGION: <AWS_REGION>\n  AWS_SECRET_ACCESS_KEY: <AWS_SECRET_ACCESS_KEY>\ntype: kuadrant.io/aws\n

    By default, Kuadrant will list the available zones and find the matching zone based on the listener host in the gateway listener. If it finds more than one matching zone for a given listener host, it will not update any of those zones. When providing a credential you should limit that credential down to just have write access to the zones you want Kuadrant to manage. Below is an example of a an AWS policy for doing this type of thing:

    {\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Sid\": \"VisualEditor0\",\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"route53:ListTagsForResources\",\n                \"route53:GetHealthCheckLastFailureReason\",\n                \"route53:GetHealthCheckStatus\",\n                \"route53:GetChange\",\n                \"route53:GetHostedZone\",\n                \"route53:ChangeResourceRecordSets\",\n                \"route53:ListResourceRecordSets\",\n                \"route53:GetHealthCheck\",\n                \"route53:UpdateHostedZoneComment\",\n                \"route53:UpdateHealthCheck\",\n                \"route53:CreateHealthCheck\",\n                \"route53:DeleteHealthCheck\",\n                \"route53:ListTagsForResource\",\n                \"route53:ListHealthChecks\",\n                \"route53:GetGeoLocation\",\n                \"route53:ListGeoLocations\",\n                \"route53:ListHostedZonesByName\",\n                \"route53:GetHealthCheckCount\"\n            ],\n            \"Resource\": [\n                \"arn:aws:route53:::hostedzone/Z08187901Y93585DDGM6K\",\n                \"arn:aws:route53:::healthcheck/*\",\n                \"arn:aws:route53:::change/*\"\n            ]\n        },\n        {\n            \"Sid\": \"VisualEditor1\",\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"route53:ListHostedZones\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n
    "},{"location":"kuadrant-operator/doc/overviews/dns/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"

    When a DNSPolicy targets a Gateway, the policy will be enforced on all gateway listeners.

    Target a Gateway by setting the spec.targetRef field of the DNSPolicy as follows:

    apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: <DNSPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n
    "},{"location":"kuadrant-operator/doc/overviews/dns/#targeting-a-specific-listener-of-a-gateway","title":"Targeting a specific Listener of a gateway","text":"

    A DNSPolicy can target a specific listener in a gateway using the sectionName property of the targetRef configuration. When you set the sectionName, the DNSPolicy will only affect that listener and no others. If you also have another DNSPolicy targeting the entire gateway, the more specific policy targeting the listerner will be the policy that is applied.

    apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: <DNSPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n    sectionName: <myListenerName>\n
    "},{"location":"kuadrant-operator/doc/overviews/dns/#dnsrecord-resource","title":"DNSRecord Resource","text":"

    The DNSPolicy will create a DNSRecord resource for each listener hostname. The DNSPolicy resource uses the status of the Gateway to determine what dns records need to be created based on the clusters it has been placed onto.

    Given the following multi cluster gateway status:

    status:\n  addresses:\n\n    - type: kuadrant.io/MultiClusterIPAddress\n      value: kind-mgc-workload-1/172.31.201.1\n    - type: kuadrant.io/MultiClusterIPAddress\n      value: kind-mgc-workload-2/172.31.202.1\n  listeners:\n    - attachedRoutes: 1\n      conditions: []\n      name: kind-mgc-workload-1.api\n      supportedKinds: []\n    - attachedRoutes: 1\n      conditions: []\n      name: kind-mgc-workload-2.api\n      supportedKinds: []\n

    A DNSPolicy targeting this gateway would create an appropriate DNSRecord based on the routing strategy selected.

    "},{"location":"kuadrant-operator/doc/overviews/dns/#loadbalanced","title":"loadbalanced","text":"
    apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n  name: echo.apps.hcpapps.net\n  namespace: <Gateway Namespace>\nspec:\n  endpoints:\n\n    - dnsName: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n      recordTTL: 60\n      recordType: A\n      targets:\n        - 172.31.202.1\n    - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n      providerSpecific:\n        - name: weight\n          value: \"120\"\n      recordTTL: 60\n      recordType: CNAME\n      setIdentifier: 24osuu.lb-2903yb.echo.apps.hcpapps.net\n      targets:\n        - 24osuu.lb-2903yb.echo.apps.hcpapps.net\n    - dnsName: default.lb-2903yb.echo.apps.hcpapps.net\n      providerSpecific:\n        - name: weight\n          value: \"120\"\n      recordTTL: 60\n      recordType: CNAME\n      setIdentifier: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n      targets:\n        - lrnse3.lb-2903yb.echo.apps.hcpapps.net\n    - dnsName: echo.apps.hcpapps.net\n      recordTTL: 300\n      recordType: CNAME\n      targets:\n        - lb-2903yb.echo.apps.hcpapps.net\n    - dnsName: lb-2903yb.echo.apps.hcpapps.net\n      providerSpecific:\n        - name: geo-country-code\n          value: '*'\n      recordTTL: 300\n      recordType: CNAME\n      setIdentifier: default\n      targets:\n        - default.lb-2903yb.echo.apps.hcpapps.net\n    - dnsName: lrnse3.lb-2903yb.echo.apps.hcpapps.net\n      recordTTL: 60\n      recordType: A\n      targets:\n        - 172.31.201.1\n  providerRefs:\n    - name: my-aws-credentials\n

    After DNSRecord reconciliation the listener hostname should be resolvable through dns:

    dig echo.apps.hcpapps.net +short\nlb-2903yb.echo.apps.hcpapps.net.\ndefault.lb-2903yb.echo.apps.hcpapps.net.\nlrnse3.lb-2903yb.echo.apps.hcpapps.net.\n172.31.201.1\n
    "},{"location":"kuadrant-operator/doc/overviews/dns/#simple","title":"simple","text":"
    apiVersion: kuadrant.io/v1alpha1\nkind: DNSRecord\nmetadata:\n  name: echo.apps.hcpapps.net\n  namespace: <Gateway Namespace>\nspec:\n  endpoints:\n\n    - dnsName: echo.apps.hcpapps.net\n      recordTTL: 60\n      recordType: A\n      targets:\n        - 172.31.201.1\n        - 172.31.202.1\n  providerRefs:\n    - name: my-aws-credentials\n

    After DNSRecord reconciliation the listener hostname should be resolvable through dns:

    dig echo.apps.hcpapps.net +short\n172.31.201.1\n
    "},{"location":"kuadrant-operator/doc/overviews/dns/#known-limitations","title":"Known limitations","text":"
    • One Gateway can only be targeted by one DNSPolicy unless subsequent DNSPolicies choose to specific a sectionName in their targetRef.
    • DNSPolicies can only target Gateways defined within the same namespace of the DNSPolicy.
    "},{"location":"kuadrant-operator/doc/overviews/dns/#troubleshooting","title":"Troubleshooting","text":""},{"location":"kuadrant-operator/doc/overviews/dns/#understanding-status","title":"Understanding status","text":"

    The Status.Conditions on DNSPolicy mostly serves as an aggregation of the DNSRecords conditions. The DNSPolicy conditions:

    • Accepted indicates that policy was validated and is accepted by the controller for the reconciliation.
    • Enforced indicates that the controller acted upon the policy. If DNSRecords were created as the result this condition will reflect the Ready condition on the record. This condition is removed if Accepted is false. If partially enforced, the condition will be set to True
    • SubResourcesHealthy reflects Healthy conditions of sub-resources. This condition is removed if Accepted is false. If partially healthy, the condition will be set to False

    The Status.Conditions on the DNSRecord are as follows:

    • Ready indicates that the record was successfully published to the provider.
    • Healthy indicates that dnshealthcheckprobes are healthy. If not all probes are healthy, the condition will be set to False
    "},{"location":"kuadrant-operator/doc/overviews/dns/#logs","title":"Logs","text":"

    To increase the log level of the kuadran-operator refer to this logging doc.

    To increase the log level of the dns-operator-controller-manager and for the examples on log queries refer to the logging section in the DNS Operator readme

    "},{"location":"kuadrant-operator/doc/overviews/dns/#debugging","title":"Debugging","text":"

    This section will provide the typical sequence of actions during the troubleshooting. It is meant to be a reference to identifying the problem rather than SOP.

    "},{"location":"kuadrant-operator/doc/overviews/dns/#list-policies-to-identify-the-failing-one","title":"List policies to identify the failing one","text":"
    kubectl get dnspolicy -A -o wide\n
    "},{"location":"kuadrant-operator/doc/overviews/dns/#inspect-the-failing-policy","title":"Inspect the failing policy","text":"

    kubectl get dnspolicy <dnspolicy-name> -n <dnspolicy-namespace> -o yaml | yq '.status.conditions'\n
    The output will show which DNSRecords and for what reasons are failing. For example:
    - lastTransitionTime: \"2024-12-04T09:46:22Z\"\n  message: DNSPolicy has been accepted\n  reason: Accepted\n  status: \"True\"\n  type: Accepted\n- lastTransitionTime: \"2024-12-04T09:46:29Z\"\n  message: 'DNSPolicy has been partially enforced. Not ready DNSRecords are: test-api '\n  reason: Enforced\n  status: \"True\"\n  type: Enforced\n- lastTransitionTime: \"2024-12-04T09:46:27Z\"\n  message: 'DNSPolicy has encountered some issues: not all sub-resources of policy are passing the policy defined health check. Not healthy DNSRecords are: test-api '\n  reason: Unknown\n  status: \"False\"\n  type: SubResourcesHealthy\n
    This example indicates that the policy was accepted and one of the DNSRecords - test-api DNSRecord - is not ready and not healthy

    "},{"location":"kuadrant-operator/doc/overviews/dns/#locate-sub-records-to-confirm-conditions","title":"Locate sub-records to confirm conditions","text":"

    This ensures that the Kuadrand operator propagated status correctly. The names of the DNSRecords are composed of the Gateway name followed by a listener name and are created in the DNSPolicy namespace.

    kubectl get dnsrecord -n <dnspolicy-namespace> \n

    "},{"location":"kuadrant-operator/doc/overviews/dns/#inspect-the-record-to-get-more-detailed-information-on-the-failure","title":"Inspect the record to get more detailed information on the failure","text":"

    kubectl get dnsrecord <dnsrecord-name> -n <dnspolicy-namespace> -o yaml | yq '.status'\n
    Most of the time the conditions will hold all necessary information. However, it is advised to pay attention to the queuedAt and validFor field to understand when the record was processed and when controller expects it to be reconciled again.

    "},{"location":"kuadrant-operator/doc/overviews/dns/#inspect-health-check-probes","title":"Inspect health check probes","text":"

    We create a probe per address per dns record. The name of the probe is DNSRecord name followed by an address.

    # list probes \nkubectl get dnshealthcheckprobe -n <dnspolicy-namespace>\n# inspect the probe \nkubectl get dnshealthcheckprobe <probe-name> -n <dnspolicy-namespace> -o yaml | yq '.status'\n

    "},{"location":"kuadrant-operator/doc/overviews/dns/#identify-what-in-logs-to-look-for","title":"Identify what in logs to look for","text":"

    There are two operators to look into and a number of controllers. The commands above should provide an understanding of what component/process is failing. Use the following to identify the correct controller:

    • If the problem in the status propagation from the DNSRecord to the DNSPolicy or in the creation of the DNSRecord: kuadrant-operator logs under kuadrant-operator.EffectiveDNSPoliciesReconciler reconciler
    • If the problem is in publishing DNSRecord or reacting to the healtcheckprobe CR: dns-operator-controller-manager logs under dnsrecord_controller reconciler
    • If the problem in creation of the probes: dns-operator-controller-manager logs under dnsrecord_controller.healthchecks reconciler
    • If the problem is in the execution of the healthchecks: dns-operator-controller-manager logs under dnsprobe_controller reconciler
    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/","title":"Kuadrant Rate Limiting","text":"

    A Kuadrant RateLimitPolicy custom resource, often abbreviated \"RateLimitPolicy\":

    1. Targets Gateway API networking resources such as HTTPRoutes and Gateways, using these resources to obtain additional context, i.e., which traffic workload (HTTP attributes, hostnames, user attributes, etc) to rate limit.
    2. Supports targeting subsets (sections) of a network resource to apply the limits to.
    3. Abstracts the details of the underlying Rate Limit protocol and configuration resources, that have a much broader remit and surface area.
    4. Enables cluster operators to set defaults that govern behavior at the lower levels of the network, until a more specific policy is applied.
    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#envoys-rate-limit-service-protocol","title":"Envoy's Rate Limit Service Protocol","text":"

    Kuadrant's Rate Limit implementation relies on the Envoy's Rate Limit Service (RLS) protocol. The workflow per request goes:

    1. On incoming request, the gateway checks the matching rules for enforcing rate limits, as stated in the RateLimitPolicy custom resources and targeted Gateway API networking objects
    2. If the request matches, the gateway sends one RateLimitRequest to the external rate limiting service (\"Limitador\").
    3. The external rate limiting service responds with a RateLimitResponse back to the gateway with either an OK or OVER_LIMIT response code.

    A RateLimitPolicy and its targeted Gateway API networking resource contain all the statements to configure both the ingress gateway and the external rate limiting service.

    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#the-ratelimitpolicy-custom-resource","title":"The RateLimitPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#overview","title":"Overview","text":"

    The RateLimitPolicy spec includes, basically, two parts:

    • A reference to an existing Gateway API resource (spec.targetRef)
    • Limit definitions (spec.limits)

    Each limit definition includes:

    • A set of rate limits (spec.limits.<limit-name>.rates[])
    • (Optional) A set of dynamic counter qualifiers (spec.limits.<limit-name>.counters[])
    • (Optional) A set of additional dynamic conditions to activate the limit (spec.limits.<limit-name>.when[])

    The limit definitions (limits) can be declared at the top-level level of the spec (with the semantics of defaults) or alternatively within explicit defaults or overrides blocks.

    Check out Kuadrant RFC 0002 to learn more about the Well-known Attributes that can be used to define counter qualifiers (counters) and conditions (when).

    Check out the API reference for a full specification of the RateLimitPolicy CRD.

    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#using-the-ratelimitpolicy","title":"Using the RateLimitPolicy","text":""},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#targeting-a-httproute-networking-resource","title":"Targeting a HTTPRoute networking resource","text":"

    When a RateLimitPolicy targets a HTTPRoute, the policy is enforced to all traffic routed according to the rules and hostnames specified in the HTTPRoute, across all Gateways referenced in the spec.parentRefs field of the HTTPRoute.

    Target a HTTPRoute by setting the spec.targetRef field of the RateLimitPolicy as follows:

    apiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: <RateLimitPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: <HTTPRoute Name>\n  limits: { \u2026 }\n

    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#hostnames-and-wildcards","title":"Hostnames and wildcards","text":"

    If a RateLimitPolicy targets a route defined for *.com and another RateLimitPolicy targets another route for api.com, the Kuadrant control plane will not merge these two RateLimitPolicies. Unless one of the policies declare an overrides set of limites, the control plane will configure to mimic the behavior of gateway implementation by which the \"most specific hostname wins\", thus enforcing only the corresponding applicable policies and limit definitions.

    E.g., by default, a request coming for api.com will be rate limited according to the rules from the RateLimitPolicy that targets the route for api.com; while a request for other.com will be rate limited with the rules from the RateLimitPolicy targeting the route for *.com.

    See more examples in Overlapping Gateway and HTTPRoute RateLimitPolicies.

    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"

    A RateLimitPolicy that targets a Gateway can declare a block of defaults (spec.defaults) or a block of overrides (spec.overrides). As a standard, gateway policies that do not specify neither defaults nor overrides, act as defaults.

    When declaring defaults, a RateLimitPolicy which targets a Gateway will be enforced to all HTTP traffic hitting the gateway, unless a more specific RateLimitPolicy targeting a matching HTTPRoute exists. Any new HTTPRoute referrencing the gateway as parent will be automatically covered by the default RateLimitPolicy, as well as changes in the existing HTTPRoutes.

    Defaults provide cluster operators with the ability to protect the infrastructure against unplanned and malicious network traffic attempt, such as by setting safe default limits on hostnames and hostname wildcards.

    Inversely, a gateway policy that specify overrides declares a set of rules to be enforced on all routes attached to the gateway, thus atomically replacing any more specific policy occasionally attached to any of those routes.

    Target a Gateway HTTPRoute by setting the spec.targetRef field of the RateLimitPolicy as follows:

    apiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: <RateLimitPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n  defaults: # alternatively: `overrides`\n    limits: { \u2026 }\n

    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#overlapping-gateway-and-httproute-ratelimitpolicies","title":"Overlapping Gateway and HTTPRoute RateLimitPolicies","text":"

    Two possible semantics are to be considered here \u2013 gateway policy defaults vs gateway policy overrides.

    Gateway RateLimitPolicies that declare defaults (or alternatively neither defaults nor overrides) protect all traffic routed through the gateway except where a more specific HTTPRoute RateLimitPolicy exists, in which case the HTTPRoute RateLimitPolicy prevails.

    Example with 4 RateLimitPolicies, 3 HTTPRoutes and 1 Gateway default (plus 2 HTTPRoute and 2 Gateways without RateLimitPolicies attached):

    • RateLimitPolicy A \u2192 HTTPRoute A (a.toystore.com) \u2192 Gateway G (*.com)
    • RateLimitPolicy B \u2192 HTTPRoute B (b.toystore.com) \u2192 Gateway G (*.com)
    • RateLimitPolicy W \u2192 HTTPRoute W (*.toystore.com) \u2192 Gateway G (*.com)
    • RateLimitPolicy G (defaults) \u2192 Gateway G (*.com)

    Expected behavior:

    • Request to a.toystore.com \u2192 RateLimitPolicy A will be enforced
    • Request to b.toystore.com \u2192 RateLimitPolicy B will be enforced
    • Request to other.toystore.com \u2192 RateLimitPolicy W will be enforced
    • Request to other.com (suppose a route exists) \u2192 RateLimitPolicy G will be enforced
    • Request to yet-another.net (suppose a route and gateway exist) \u2192 No RateLimitPolicy will be enforced

    Gateway RateLimitPolicies that declare overrides protect all traffic routed through the gateway, regardless of existence of any more specific HTTPRoute RateLimitPolicy.

    Example with 4 RateLimitPolicies, 3 HTTPRoutes and 1 Gateway override (plus 2 HTTPRoute and 2 Gateways without RateLimitPolicies attached):

    • RateLimitPolicy A \u2192 HTTPRoute A (a.toystore.com) \u2192 Gateway G (*.com)
    • RateLimitPolicy B \u2192 HTTPRoute B (b.toystore.com) \u2192 Gateway G (*.com)
    • RateLimitPolicy W \u2192 HTTPRoute W (*.toystore.com) \u2192 Gateway G (*.com)
    • RateLimitPolicy G (overrides) \u2192 Gateway G (*.com)

    Expected behavior:

    • Request to a.toystore.com \u2192 RateLimitPolicy G will be enforced
    • Request to b.toystore.com \u2192 RateLimitPolicy G will be enforced
    • Request to other.toystore.com \u2192 RateLimitPolicy G will be enforced
    • Request to other.com (suppose a route exists) \u2192 RateLimitPolicy G will be enforced
    • Request to yet-another.net (suppose a route and gateway exist) \u2192 No RateLimitPolicy will be enforced
    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#limit-definition","title":"Limit definition","text":"

    A limit will be activated whenever a request comes in and the request matches:

    • all of the when conditions specified in the limit.

    A limit can define:

    • counters that are qualified based on dynamic values fetched from the request, or
    • global counters (implicitly, when no qualified counter is specified)

    A limit is composed of one or more rate limits.

    E.g.

    spec:\n  limits:\n    \"toystore-all\":\n      rates:\n\n        - limit: 5000\n          window: 1s\n\n    \"toystore-api-per-username\":\n      rates:\n\n        - limit: 100\n          window: 1s\n        - limit: 1000\n          window: 1m\n      counters:\n        - expression: auth.identity.username\n      when:\n        - predicate: request.host == 'api.toystore.com'\n\n    \"toystore-admin-unverified-users\":\n      rates:\n\n        - limit: 250\n          window: 1s\n      when:\n        - predicate: request.host == 'admin.toystore.com'\n        - predicate: !auth.identity.email_verified\n
    Request to Rate limits enforced api.toystore.com 100rps/username or 1000rpm/username (whatever happens first) admin.toystore.com 250rps other.toystore.com 5000rps"},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#when-conditions","title":"when conditions","text":"

    when conditions can be used to scope a limit (i.e. to filter the traffic to which a limit definition applies) without any coupling to the underlying network topology, i.e. without making direct references to HTTPRouteRules.

    Use when conditions to conditionally activate limits based on attributes that cannot be expressed in the HTTPRoutes' spec.hostnames and spec.rules.matches fields, or in general in RateLimitPolicies that target a Gateway.

    The selectors within the when conditions of a RateLimitPolicy are a subset of Kuadrant's Well-known Attributes (RFC 0002). Check out the reference for the full list of supported selectors.

    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#examples","title":"Examples","text":"

    Check out the following user guides for examples of rate limiting services with Kuadrant:

    • Simple Rate Limiting for Applications
    • Authenticated Rate Limiting for Application
    • Gateway Rate Limiting for Cluster Operators
    • Authenticated Rate Limiting with JWTs and Kubernetes RBAC
    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#known-limitations","title":"Known limitations","text":"
    • RateLimitPolicies can only target HTTPRoutes/Gateways defined within the same namespace of the RateLimitPolicy.
    • 2+ RateLimitPolicies cannot target network resources that define/inherit the same exact hostname.
    "},{"location":"kuadrant-operator/doc/overviews/rate-limiting/#implementation-details","title":"Implementation details","text":"

    Driven by limitations related to how Istio injects configuration in the filter chains of the ingress gateways, Kuadrant relies on Envoy's Wasm Network filter in the data plane, to manage the integration with rate limiting service (\"Limitador\"), instead of the Rate Limit filter.

    Motivation: Multiple rate limit domains

    The first limitation comes from having only one filter chain per listener. This often leads to one single global rate limiting filter configuration per gateway, and therefore to a shared rate limit domain across applications and policies. Even though, in a rate limit filter, the triggering of rate limit calls, via actions to build so-called \"descriptors\", can be defined at the level of the virtual host and/or specific route rule, the overall rate limit configuration is only one, i.e., always the same rate limit domain for all calls to Limitador.

    On the other hand, the possibility to configure and invoke the rate limit service for multiple domains depending on the context allows to isolate groups of policy rules, as well as to optimize performance in the rate limit service, which can rely on the domain for indexation.

    Motivation: Fine-grained matching rules A second limitation of configuring the rate limit filter via Istio, particularly from Gateway API resources, is that rate limit descriptors at the level of a specific HTTP route rule require \"named routes\" \u2013 defined only in an Istio VirtualService resource and referred in an EnvoyFilter one. Because Gateway API HTTPRoute rules lack a \"name\" property1, as well as the Istio VirtualService resources are only ephemeral data structures handled by Istio in-memory in its implementation of gateway configuration for Gateway API, where the names of individual route rules are auto-generated and not referable by users in a policy23, rate limiting by attributes of the HTTP request (e.g., path, method, headers, etc) would be very limited while depending only on Envoy's Rate Limit filter.

    Motivated by the desire to support multiple rate limit domains per ingress gateway, as well as fine-grained HTTP route matching rules for rate limiting, Kuadrant implements a wasm-shim that handles the rules to invoke the rate limiting service, complying with Envoy's Rate Limit Service (RLS) protocol.

    The wasm module integrates with the gateway in the data plane via Wasm Network filter, and parses a configuration composed out of user-defined RateLimitPolicy resources by the Kuadrant control plane. Whereas the rate limiting service (\"Limitador\") remains an implementation of Envoy's RLS protocol, capable of being integrated directly via Rate Limit extension or by Kuadrant, via wasm module for the Istio Gateway API implementation.

    As a consequence of this design:

    • Users can define fine-grained rate limit rules that match their Gateway and HTTPRoute definitions including for subsections of these.
    • Rate limit definitions are insulated, not leaking across unrelated policies or applications.
    • Conditions to activate limits are evaluated in the context of the gateway process, reducing the gRPC calls to the external rate limiting service only to the cases where rate limit counters are known in advance to have to be checked/incremented.
    • The rate limiting service can rely on the indexation to look up for groups of limit definitions and counters.
    • Components remain compliant with industry protocols and flexible for different integration options.

    A Kuadrant wasm-shim configuration for one RateLimitPolicy custom resources targeting a HTTPRoute looks like the following and it is generated automatically by the Kuadrant control plane:

    apiVersion: extensions.istio.io/v1alpha1\nkind: WasmPlugin\nmetadata:\n  creationTimestamp: \"2024-10-01T16:59:40Z\"\n  generation: 1\n  name: kuadrant-kuadrant-ingressgateway\n  namespace: gateway-system\n  ownerReferences:\n\n    - apiVersion: gateway.networking.k8s.io/v1\n      blockOwnerDeletion: true\n      controller: true\n      kind: Gateway\n      name: kuadrant-ingressgateway\n      uid: 0298355b-fb30-4442-af2b-88d0c05bd2bd\n  resourceVersion: \"11253\"\n  uid: 36ef1fb7-9eca-46c7-af63-fe783f40148c\nspec:\n  phase: STATS\n  pluginConfig:\n    services:\n      ratelimit-service:\n        type: ratelimit\n        endpoint: ratelimit-cluster\n        failureMode: allow\n    actionSets:\n      - name: some_name_0\n        routeRuleConditions:\n          hostnames:\n            - \"*.toystore.website\"\n            - \"*.toystore.io\"\n          predicates:\n            - request.url_path.startsWith(\"/assets\")\n        actions:\n          - service: ratelimit-service\n            scope: gateway-system/app-rlp\n            predicates:\n              - request.host.endsWith('.toystore.website')\n            data:\n              - expression:\n                  key: limit.toystore_assets_all_domains__b61ee8e6\n                  value: \"1\"\n      - name: some_name_1\n        routeRuleConditions:\n          hostnames:\n            - \"*.toystore.website\"\n            - \"*.toystore.io\"\n          predicates:\n            - request.url_path.startsWith(\"/v1\")\n        actions:\n          - service: ratelimit-service\n            scope: gateway-system/app-rlp\n            predicates:\n              - request.host.endsWith('.toystore.website')\n              - auth.identity.username == \"\"\n            data:\n              - expression:\n                  key: limit.toystore_v1_website_unauthenticated__377837ee\n                  value: \"1\"\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: kuadrant-ingressgateway\n  url: oci://quay.io/kuadrant/wasm-shim:latest\n
    1. https://github.com/kubernetes-sigs/gateway-api/pull/996\u00a0\u21a9

    2. https://github.com/istio/istio/issues/36790\u00a0\u21a9

    3. https://github.com/istio/istio/issues/37346\u00a0\u21a9

    "},{"location":"kuadrant-operator/doc/overviews/tls/","title":"TLS","text":"

    A Kuadrant TLSPolicy custom resource:

    Targets Gateway API networking resources Gateways to provide tls for gateway listeners by managing the lifecycle of tls certificates using CertManager.

    "},{"location":"kuadrant-operator/doc/overviews/tls/#how-it-works","title":"How it works","text":""},{"location":"kuadrant-operator/doc/overviews/tls/#the-tlspolicy-custom-resource","title":"The TLSPolicy custom resource","text":""},{"location":"kuadrant-operator/doc/overviews/tls/#overview","title":"Overview","text":"

    The TLSPolicy spec includes the following parts:

    • A reference to an existing Gateway API resource (spec.targetRef)
    "},{"location":"kuadrant-operator/doc/overviews/tls/#high-level-example-and-field-definition","title":"High-level example and field definition","text":"
    apiVersion: kuadrant.io/v1\nkind: TLSPolicy\nmetadata:\n  name: my-tls-policy\nspec:\n  # reference to an existing networking resource to attach the policy to\n  # it can only be a Gateway API Gateway resource\n  # it can only refer to objects in the same namespace as the TLSPolicy\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: mygateway\n

    Check out the API reference for a full specification of the TLSPolicy CRD.

    "},{"location":"kuadrant-operator/doc/overviews/tls/#using-the-tlspolicy","title":"Using the TLSPolicy","text":""},{"location":"kuadrant-operator/doc/overviews/tls/#targeting-a-gateway-networking-resource","title":"Targeting a Gateway networking resource","text":"

    When a TLSPolicy targets a Gateway, the policy will be enforced on all gateway listeners that have a valid TLS section.

    Target a Gateway by setting the spec.targetRef field of the TLSPolicy as follows:

    apiVersion: kuadrant.io/v1\nkind: TLSPolicy\nmetadata:\n  name: <TLSPolicy name>\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: <Gateway Name>\n
    "},{"location":"kuadrant-operator/doc/overviews/tls/#examples","title":"Examples","text":"

    Check out the following user guides for examples of using the Kuadrant TLSPolicy:

    "},{"location":"kuadrant-operator/doc/overviews/tls/#known-limitations","title":"Known limitations","text":""},{"location":"kuadrant-operator/doc/reference/authpolicy/","title":"The AuthPolicy Custom Resource Definition (CRD)","text":"
    • AuthPolicy
    • AuthPolicySpec
    • AuthScheme
      • AuthRuleCommon
      • AuthenticationRule
      • MetadataRule
      • AuthorizationRule
      • ResponseSpec
      • SuccessResponseSpec
        • SuccessResponseItem
      • CallbackRule
    • NamedPattern
    • AuthPolicyCommonSpec
    • AuthPolicyStatus
    • ConditionSpec
    "},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicy","title":"AuthPolicy","text":"Field Type Required Description spec AuthPolicySpec Yes The specification for AuthPolicy custom resource status AuthPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicyspec","title":"AuthPolicySpec","text":"Field Type Required Description targetRef LocalPolicyTargetReference Yes Reference to a Kubernetes resource that the policy attaches to rules AuthScheme No Implicit default authentication/authorization rules patterns MapNamedPattern> No Implicit default named patterns of lists of selector, operator and value tuples, to be reused in when conditions and pattern-matching authorization rules. when []PatternExpressionOrRef No List of implicit default additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames and spec.rules.matches fields, or when targeting a Gateway. defaults AuthPolicyCommonSpec No Explicit default definitions. This field is mutually exclusive with any of the implicit default definitions: spec.rules, spec.patterns, spec.when overrides AuthPolicyCommonSpec No Atomic overrides definitions. This field is mutually exclusive with any of the implicit or explicit default definitions: spec.rules, spec.patterns, spec.when, spec.default"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicycommonspec","title":"AuthPolicyCommonSpec","text":"Field Type Required Description rules AuthScheme No Authentication/authorization rules patterns MapNamedPattern> No Named patterns of lists of selector, operator and value tuples, to be reused in when conditions and pattern-matching authorization rules. when []PatternExpressionOrRef No List of additional dynamic conditions (expressions) to activate the policy. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames and spec.rules.matches fields, or when targeting a Gateway."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authscheme","title":"AuthScheme","text":"Field Type Required Description authentication MapAuthenticationRule> No Authentication rules. At least one config MUST evaluate to a valid identity object for the auth request to be successful. If omitted or empty, anonymous access is assumed. metadata MapMetadataRule> No Rules for fetching auth metadata from external sources. authorization MapAuthorizationRule> No Authorization rules. All policies MUST allow access for the auth request be successful. response ResponseSpec No Customizations to the response to the authorization request. Use it to set custom values for unauthenticated, unauthorized, and/or success access request. callbacks MapCallbackRule> No Rules for post-authorization callback requests to external services. Triggered regardless of the result of the authorization request."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authrulecommon","title":"AuthRuleCommon","text":"Field Type Required Description when []PatternExpressionOrRef No List of additional dynamic conditions (expressions) to activate the auth rule. Use it for filtering attributes that cannot be expressed in the targeted HTTPRoute's spec.hostnames and spec.rules.matches fields, or when targeting a Gateway. cache Caching spec No Caching options for the resolved object returned when applying this auth rule. (Default: disabled) priority Integer No Priority group of the auth rule. All rules in the same priority group are evaluated concurrently; consecutive priority groups are evaluated sequentially. (Default: 0) metrics Boolean No Whether the auth rule emits individual observability metrics. (Default: false)"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authenticationrule","title":"AuthenticationRule","text":"Field Type Required Description apiKey API Key authentication spec No Authentication based on API keys stored in Kubernetes secrets. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. kubernetesTokenReview KubernetesTokenReview spec No Authentication by Kubernetes token review. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. jwt JWT verification spec No Authentication based on JSON Web Tokens (JWT). Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. oauth2Introspection OAuth2 Token Introscpection spec No Authentication by OAuth2 token introspection. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. x509 X.509 authentication spec No Authentication based on client X.509 certificates. The certificates presented by the clients must be signed by a trusted CA whose certificates are stored in Kubernetes secrets. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. plain Plain identity object spec No Identity object extracted from the context. Use this method when authentication is performed beforehand by a proxy and the resulting object passed to Authorino as JSON in the auth request. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. anonymous Anonymous access No Anonymous access. Use one of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous. credentials Auth credentials spec No Customizations to where credentials are required to be passed in the request for authentication based on this auth rule. Defaults to HTTP Authorization header with prefix \"Bearer\". overrides Identity extension spec No JSON overrides to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc). defaults Identity extension spec No JSON defaults to set to the resolved identity object. Do not use it with identity objects of other JSON types (array, string, etc). (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#metadatarule","title":"MetadataRule","text":"Field Type Required Description http HTTP GET/GET-by-POST external metadata spec No External source of auth metadata via HTTP request. Use one of: http, userInfo, uma. userInfo OIDC UserInfo spec No OpendID Connect UserInfo linked to an OIDC authentication rule declared in this same AuthPolicy. Use one of: http, userInfo, uma. uma UMA metadata spec No User-Managed Access (UMA) source of resource data. Use one of: http, userInfo, uma. (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#authorizationrule","title":"AuthorizationRule","text":"Field Type Required Description patternMatching Pattern-matching authorization spec No Pattern-matching authorization rules. Use one of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb. opa OPA authorization spec No Open Policy Agent (OPA) Rego policy. Use one of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb. kubernetesSubjectAccessReview Kubernetes SubjectAccessReview spec No Authorization by Kubernetes SubjectAccessReview. Use one of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb. spicedb SpiceDB authorization spec No Authorization decision delegated to external Authzed/SpiceDB server. Use one of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb. (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#responsespec","title":"ResponseSpec","text":"Field Type Required Description unauthenticated Custom denial status spec No Customizations on the denial status and other HTTP attributes when the request is unauthenticated. (Default: 401 Unauthorized) unauthorized Custom denial status spec No Customizations on the denial status and other HTTP attributes when the request is unauthorized. (Default: 403 Forbidden) success SuccessResponseSpec No Response items to be included in the auth response when the request is authenticated and authorized."},{"location":"kuadrant-operator/doc/reference/authpolicy/#successresponsespec","title":"SuccessResponseSpec","text":"Field Type Required Description headers Map<string:SuccessResponseItem> No Custom success response items wrapped as HTTP headers to be injected in the request. filters Map<string:SuccessResponseItem> No Custom success response items made available to other filters managed by Kuadrant (i.e. Rate Limit)."},{"location":"kuadrant-operator/doc/reference/authpolicy/#successresponseitem","title":"SuccessResponseItem","text":"Field Type Required Description plain Plain text response item No Plain text content. Use one of: plain, json, wristband. json JSON injection response item No Specification of a JSON object. Use one of: plain, json, wristband. wristband Festival Wristband token response item No Specification of a JSON object. Use one of: plain, json, wristband. key String No The key used to add the custom response item (name of the HTTP header or root property of the Dynamic Metadata object). Defaults to the name of the response item if omitted."},{"location":"kuadrant-operator/doc/reference/authpolicy/#callbackrule","title":"CallbackRule","text":"Field Type Required Description http HTTP endpoints callback spec No HTTP endpoint settings to build the callback request (webhook). (inline) AuthRuleCommon No"},{"location":"kuadrant-operator/doc/reference/authpolicy/#namedpattern","title":"NamedPattern","text":"Field Type Required Description selector String Yes A valid Well-known attribute whose resolved value in the data plane will be compared to value, using the operator. operator String Yes The binary operator to be applied to the resolved value specified by the selector. One of: eq (equal to), neq (not equal to), incl (includes; for arrays), excl (excludes; for arrays), matches (regex). value String Yes The static value to be compared to the one resolved from the selector."},{"location":"kuadrant-operator/doc/reference/authpolicy/#authpolicystatus","title":"AuthPolicyStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/authpolicy/#conditionspec","title":"ConditionSpec","text":"
    • The lastTransitionTime field provides a timestamp for when the entity last transitioned from one status to another.
    • The message field is a human-readable message indicating details about the transition.
    • The reason field is a unique, one-word, CamelCase reason for the condition\u2019s last transition.
    • The status field is a string, with possible values True, False, and Unknown.
    • The type field is a string with the following possible values:
    • Available: the resource has successfully configured;
    Field Type Description type String Condition Type status String Status: True, False, Unknown reason String Condition state reason message String Condition state description lastTransitionTime Timestamp Last transition timestamp"},{"location":"kuadrant-operator/doc/reference/authpolicy/#high-level-example","title":"High-level example","text":"
    apiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: my-auth-policy\nspec:\n  # Reference to an existing networking resource to attach the policy to. REQUIRED.\n  # It can be a Gateway API HTTPRoute or Gateway resource.\n  # It can only refer to objects in the same namespace as the AuthPolicy.\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute / Gateway\n    name: myroute / mygateway\n\n  # Additional dynamic conditions to trigger the AuthPolicy.\n  # Use it for filtering attributes not supported by HTTPRouteRule or with AuthPolicies that target a Gateway.\n  # Check out https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md to learn more\n  # about the Well-known Attributes that can be used in this field.\n  # Equivalent to if otherwise declared within `defaults`.\n  when: [\u2026]\n\n  # Sets of common patterns of selector-operator-value triples, to be referred by name in `when` conditions\n  # and pattern-matching rules. Often employed to avoid repetition in the policy.\n  # Equivalent to if otherwise declared within `defaults`.\n  patterns: { \u2026 }\n\n  # The auth rules to apply to the network traffic routed through the targeted resource.\n  # Equivalent to if otherwise declared within `defaults`.\n  rules:\n    # Authentication rules to enforce.\n    # At least one config must evaluate to a valid identity object for the auth request to be successful.\n    # If omitted or empty, anonymous access is assumed.\n    authentication:\n      \"my-authn-rule\":\n        # The authentication method of this rule.\n        # One-of: apiKey, jwt, oauth2Introspection, kubernetesTokenReview, x509, plain, anonymous.\n        apiKey: { \u2026 }\n\n        # Where credentials are required to be passed in the request for authentication based on this rule.\n        # One-of: authorizationHeader, customHeader, queryString, cookie.\n        credentials:\n          authorizationHeader:\n            prefix: APIKEY\n\n        # Rule-level additional conditions.\n        when: [\u2026]\n\n        # Configs for caching the resolved object returned out of evaluating this auth rule.\n        cache: { \u2026 }\n\n    # Rules for fetching auth metadata from external sources.\n    metadata:\n      \"my-external-source\":\n        # The method for fetching metadata from the external source.\n        # One-of: http: userInfo, uma.\n        http: { \u2026 }\n\n    # Authorization rules to enforce.\n    # All policies must allow access for the auth request be successful.\n    authorization:\n      \"my-authz-rule\":\n        # The authorization method of this rule.\n        # One-of: patternMatching, opa, kubernetesSubjectAccessReview, spicedb.\n        opa: { \u2026 }\n\n    # Customizations to the authorization response.\n    response:\n      # Custom denial status and other HTTP attributes for unauthenticated requests.\n      unauthenticated: { \u2026 }\n\n      # Custom denial status and other HTTP attributes for unauhtorized requests.\n      unauthorized: { \u2026 }\n\n      # Custom response items when access is granted.\n      success:\n        # Custom response items wrapped as HTTP headers to be injected in the request\n        headers:\n          \"my-custom-header\":\n            # One-of: plain, json, wristband.\n            plain: { \u2026 }\n\n        # Custom response items wrapped as envoy dynamic metadata.\n        dynamicMetadata:\n          # One-of: plain, json, wristband.\n          \"my-custom-dyn-metadata\":\n            json: { \u2026 }\n\n    # Rules for post-authorization callback requests to external services.\n    # Triggered regardless of the result of the authorization request.\n    callbacks:\n      \"my-webhook\":\n        http: { \u2026 }\n\n    # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on\n    # routes that lack a more specific policy attached to.\n    # Mutually exclusive with `overrides` and with declaring the `rules`, `when` and `patterns` at the top-level of\n    # the spec.\n    defaults:\n      rules:\n        authentication: { \u2026 }\n        metadata: { \u2026 }\n        authorization: { \u2026 }\n        response: { \u2026 }\n        callbacks: { \u2026 }\n      when: [\u2026]\n      patterns: { \u2026 }\n\n    # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,\n    # thus also overriding any more specific policy occasionally attached to any of those routes.\n    # Mutually exclusive with `defaults` and with declaring `rules`, `when` and `patterns` at the top-level of\n    # the spec.\n    overrides:\n      rules:\n        authentication: { \u2026 }\n        metadata: { \u2026 }\n        authorization: { \u2026 }\n        response: { \u2026 }\n        callbacks: { \u2026 }\n      when: [\u2026]\n      patterns: { \u2026 }\n
    "},{"location":"kuadrant-operator/doc/reference/dnspolicy/","title":"The DNSPolicy Custom Resource Definition (CRD)","text":"
    • DNSPolicy
    • DNSPolicySpec
    • excludeAddresses
    • ProviderRefs
    • HealthCheckSpec
    • LoadBalancingSpec
      • LoadBalancingWeighted
      • CustomWeight
      • LoadBalancingGeo
    • DNSPolicyStatus
    • HealthCheckStatus
    "},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicy","title":"DNSPolicy","text":"Field Type Required Description spec DNSPolicySpec Yes The specification for DNSPolicy custom resource status DNSPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicyspec","title":"DNSPolicySpec","text":"Field Type Required Description targetRef Gateway API LocalPolicyTargetReferenceWithSectionName Yes Reference to a Kubernetes resource that the policy attaches to healthCheck HealthCheckSpec No HealthCheck spec loadBalancing LoadBalancingSpec No LoadBalancing Spec providerRefs ProviderRefs Yes array of references to providers. (currently limited to max 1)"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#providerrefs","title":"ProviderRefs","text":"Field Type Required Description providerRefs []ProviderRef Yes max 1 reference. This is an array of providerRef that points to a local secret(s) that contains the required provider auth values"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#providerref","title":"ProviderRef","text":"Field Type Required Description name String Yes Name of the secret in the same namespace that contains the provider credentials"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#excludeaddresses","title":"ExcludeAddresses","text":"Field Type Required Description excludeAddresses []String No set of hostname, CIDR or IP Addresses to exclude from the DNS Provider"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#healthcheckspec","title":"HealthCheckSpec","text":"Field Type Required Description name String Yes Name of the secret in the same namespace that contains the provider credentials -------------------- ------------ :------------: ----------------------------------------------------------------------------------------------------------- path String Yes Path is the path to append to the host to reach the expected health check. Must start with \"?\" or \"/\", contain only valid URL characters and end with alphanumeric char or \"/\". For example \"/\" or \"/healthz\" are common port Number Yes Port to connect to the host on. Must be either 80, 443 or 1024-49151 protocol String Yes Protocol to use when connecting to the host, valid values are \"HTTP\" or \"HTTPS\" failureThreshold Number Yes FailureThreshold is a limit of consecutive failures that must occur for a host to be considered unhealthy interval Duration Yes Interval defines how frequently this probe should execute additionalHeadersRef String No AdditionalHeadersRef refers to a secret that contains extra headers to send in the probe request, this is primarily useful if an authentication token is required by the endpoint. allowInsecureCertificate Boolean No AllowInsecureCertificate will instruct the health check probe to not fail on a self-signed or otherwise invalid SSL certificate this is primarily used in development or testing environments"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#loadbalancingspec","title":"LoadBalancingSpec","text":"Field Type Required Description defaultGeo Boolean Yes Specifies if this is the default geo geo String Yes Geo value to apply to geo endpoints weight Number No Weight value to apply to weighted endpoints default: 120"},{"location":"kuadrant-operator/doc/reference/dnspolicy/#dnspolicystatus","title":"DNSPolicyStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []Kubernetes meta/v1.Condition List of conditions that define that status of the resource. healthCheck HealthCheckStatus HealthCheck status. recordConditions [String][]Kubernetes meta/v1.Condition Status of individual DNSRecords owned by this policy."},{"location":"kuadrant-operator/doc/reference/dnspolicy/#healthcheckstatus","title":"HealthCheckStatus","text":"Field Type Description conditions []Kubernetes meta/v1.Condition List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/dnspolicy/#high-level-example","title":"High-level example","text":"
    apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: my-dns-policy\nspec:\n  # reference to an existing networking resource to attach the policy to\n  # it can only be a Gateway API Gateway resource\n  # it can only refer to objects in the same namespace as the DNSPolicy\n  # it can target a specific listener using sectionName\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: mygateway\n    sectionName: api # (optional) if not set policy applies to all listeners that do not have a policy attached directly\n\n  # reference to an existing secret resource containing provider credentials and configuration\n  # it can only refer to Secrets in the same namespace as the DNSPolicy that have the type kuadrant.io/(provider) e.g kuadrant.io/aws\n  providerRefs:\n\n    - name: my-aws-credentials\n\n  # (optional) loadbalancing specification\n  # use it for providing the specification of how dns will be configured in order to provide balancing of requests across multiple clusters. If not configured, a simple A or CNAME record will be created. If you have a policy with no loadbalancing defined and want to move to a loadbalanced configuration, you will need to delete and re-create the policy.\n  loadBalancing:\n    # is this the default geo to be applied to records. It is important that you set the default geo flag to true **Only** for the GEO value you wish to act as the catchall GEO, you should not set multiple GEO values as default for a given targeted listener. Example: policy 1 targets listener 1 with a geo of US and sets default to true. Policy 2 targets a listener on another cluster and set the geo to EU and default to false. It is fine for policies in the same default GEO to set the value to true. The main thing is to have only one unique GEO set as the default for any shared listener hostname.\n    defaultGeo: true\n    # weighted specification. This will apply the given weight to the records created based on the targeted gateway listeners. If you have multiple gateways that share a listener host, you can set different weight values to influence how much traffic will be brought to a given gateway.\n    weight: 100\n    # This is the actual GEO location to set for records created by this policy. This can and should be different if you have multiple gateways across multiple geographic areas.\n\n    # AWS: To see all regions supported by AWS Route 53, please see the official (documentation)[https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values-geo.html]. With Route 53 when setting a continent code use a \"GEO-\" prefix otherwise it will be considered a country code.\n\n    # GCP: To see all regions supported by GCP Cloud DNS, please see the official (documentation)[https://cloud.google.com/compute/docs/regions-zones]\n\n    #To see the different values you can use for the geo based DNS with Azure take a look at the following (documentation)[https://learn.microsoft.com/en-us/azure/traffic-manager/traffic-manager-geographic-regions]\n    geo: IE\n\n  # (optional) health check specification\n  # health check probes with the following specification will be created for each DNS target, these probes constantly check that the endpoint can be reached. They will flag an unhealthy endpoint in the status. If no DNSRecord has yet been published and the endpoint is unhealthy, the record will not be published until the health check passes.\n  healthCheck:\n    # the path on the listener host(s) that you want to check.\n    path: /health\n    # how many times does the health check need to fail before unhealthy.\n    failureThreshold: 3\n    # how often should it be checked.\n    interval: 5min\n    # additionalHeadersRef is reference to a local secret with a set of key value pairs to be used as headers when sending the health check request.\n    additionalHeadersRef:\n      name: headers\n
    "},{"location":"kuadrant-operator/doc/reference/kuadrant/","title":"The Kuadrant Custom Resource Definition (CRD)","text":""},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrant","title":"kuadrant","text":"Field Type Required Description spec KuadrantSpec No Blank specification status KuadrantStatus No The status for the custom resources."},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrantspec","title":"KuadrantSpec","text":"

    Currently blank specification.

    "},{"location":"kuadrant-operator/doc/reference/kuadrant/#kuadrantstatus","title":"KuadrantStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/","title":"The RateLimitPolicy Custom Resource Definition (CRD)","text":""},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicy","title":"RateLimitPolicy","text":"Field Type Required Description spec RateLimitPolicySpec Yes The specification for RateLimitPolicy custom resource status RateLimitPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicyspec","title":"RateLimitPolicySpec","text":"Field Type Required Description targetRef LocalPolicyTargetReferenceWithSectionName Yes Reference to a Kubernetes resource that the policy attaches to. For more info defaults RateLimitPolicyCommonSpec No Default limit definitions. This field is mutually exclusive with the limits field overrides RateLimitPolicyCommonSpec No Overrides limit definitions. This field is mutually exclusive with the limits field and defaults field. This field is only allowed for policies targeting Gateway in targetRef.kind limits MapLimit> No Limit definitions. This field is mutually exclusive with the defaults field"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#localpolicytargetreferencewithsectionname","title":"LocalPolicyTargetReferenceWithSectionName","text":"Field Type Required Description LocalPolicyTargetReference LocalPolicyTargetReference Yes Reference to a local policy target. sectionName SectionName No Section name for further specificity (if needed)."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#localpolicytargetreference","title":"LocalPolicyTargetReference","text":"Field Type Required Description group Group Yes Group of the target resource. kind Kind Yes Kind of the target resource. name ObjectName Yes Name of the target resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#sectionname","title":"SectionName","text":"Field Type Required Description SectionName v1.SectionName (String) Yes SectionName is the name of a section in a Kubernetes resource. In the following resources, SectionName is interpreted as the following: Gateway: Listener name HTTPRoute: HTTPRouteRule name* Service: Port name ### RateLimitPolicyCommonSpec Field Type Required Description when []Predicate No List of dynamic predicates to activate the policy. All expression must evaluate to true for the policy to be applied limits MapLimit> No Explicit Limit definitions. This field is mutually exclusive with RateLimitPolicySpec limits field"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#predicate","title":"Predicate","text":"Field Type Required Description predicate String Yes Defines one CEL expression that must be evaluated to bool"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#counter","title":"Counter","text":"Field Type Required Description expression String Yes Defines one CEL expression that will be used as rate limiting counter"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#limit","title":"Limit","text":"Field Type Required Description rates []RateLimit No List of rate limits associated with the limit definition counters []Counter No List of rate limit counter qualifiers. Items must be a valid Well-known attribute. Each distinct value resolved in the data plane starts a separate counter for each rate limit. when []Predicate No List of dynamic predicates to activate the limit. All expression must evaluate to true for the limit to be applied"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimit","title":"RateLimit","text":"Field Type Required Description limit Number Yes Maximum value allowed within the given period of time (duration) window String Yes The period of time that the limit applies. Follows Gateway API Duration format"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#ratelimitpolicystatus","title":"RateLimitPolicyStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []ConditionSpec List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#conditionspec","title":"ConditionSpec","text":"
    • The lastTransitionTime field provides a timestamp for when the entity last transitioned from one status to another.
    • The message field is a human-readable message indicating details about the transition.
    • The reason field is a unique, one-word, CamelCase reason for the condition\u2019s last transition.
    • The status field is a string, with possible values True, False, and Unknown.
    • The type field is a string with the following possible values:
      • Available: the resource has successfully configured;
    Field Type Description type String Condition Type status String Status: True, False, Unknown reason String Condition state reason message String Condition state description lastTransitionTime Timestamp Last transition timestamp"},{"location":"kuadrant-operator/doc/reference/ratelimitpolicy/#high-level-example","title":"High-level example","text":"
    apiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: my-rate-limit-policy\nspec:\n  # Reference to an existing networking resource to attach the policy to. REQUIRED.\n  # It can be a Gateway API HTTPRoute or Gateway resource.\n  # It can only refer to objects in the same namespace as the RateLimitPolicy.\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute / Gateway\n    name: myroute / mygateway\n\n  # The limits definitions to apply to the network traffic routed through the targeted resource.\n  # Equivalent to if otherwise declared within `defaults`.\n  limits:\n    \"my_limit\":\n      # The rate limits associated with this limit definition. REQUIRED.\n      # E.g., to specify a 50rps rate limit, add `{ limit: 50, duration: 1, unit: secod }`\n      rates: [\u2026]\n\n      # Counter qualifiers.\n      # Each dynamic value in the data plane starts a separate counter, combined with each rate limit.\n      # E.g., to define a separate rate limit for each user name detected by the auth layer, add `metadata.filter_metadata.envoy\\.filters\\.http\\.ext_authz.username`.\n      # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n      counters: [\u2026]\n\n      # Additional dynamic conditions to trigger the limit.\n      # Use it for filtering attributes not supported by HTTPRouteRule or with RateLimitPolicies that target a Gateway.\n      # Check out Kuadrant RFC 0002 (https://github.com/Kuadrant/architecture/blob/main/rfcs/0002-well-known-attributes.md) to learn more about the Well-known Attributes that can be used in this field.\n      when: [\u2026]\n\n    # Explicit defaults. Used in policies that target a Gateway object to express default rules to be enforced on\n    # routes that lack a more specific policy attached to.\n    # Mutually exclusive with `overrides` and with declaring `limits` at the top-level of the spec.\n    defaults:\n      limits: { \u2026 }\n\n    # Overrides. Used in policies that target a Gateway object to be enforced on all routes linked to the gateway,\n    # thus also overriding any more specific policy occasionally attached to any of those routes.\n    # Mutually exclusive with `defaults` and with declaring `limits` at the top-level of the spec.\n    overrides:\n      limits: { \u2026 }\n
    "},{"location":"kuadrant-operator/doc/reference/tlspolicy/","title":"The TLSPolicy Custom Resource Definition (CRD)","text":"
    • TLSPolicy
    • TLSPolicySpec
    • TLSPolicyStatus
    "},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicy","title":"TLSPolicy","text":"Field Type Required Description spec TLSPolicySpec Yes The specification for TLSPolicy custom resource status TLSPolicyStatus No The status for the custom resource"},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicyspec","title":"TLSPolicySpec","text":"Field Type Required Description targetRef Gateway API LocalPolicyTargetReference Yes Reference to a Kuberentes resource that the policy attaches to issuerRef CertManager meta/v1.ObjectReference Yes IssuerRef is a reference to the issuer for the created certificate commonName String No CommonName is a common name to be used on the created certificate duration Kubernetes meta/v1.Duration No The requested 'duration' (i.e. lifetime) of the created certificate. renewBefore Kubernetes meta/v1.Duration No How long before the currently issued certificate's expiry cert-manager should renew the certificate. usages []CertManager v1.KeyUsage No Usages is the set of x509 usages that are requested for the certificate. Defaults to digital signature and key encipherment if not specified revisionHistoryLimit Number No RevisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history privateKey CertManager meta/v1.CertificatePrivateKey No Options to control private keys used for the Certificate

    IssuerRef certmanmetav1.ObjectReference

    "},{"location":"kuadrant-operator/doc/reference/tlspolicy/#tlspolicystatus","title":"TLSPolicyStatus","text":"Field Type Description observedGeneration String Number of the last observed generation of the resource. Use it to check if the status info is up to date with latest resource spec. conditions []Kubernetes meta/v1.Condition List of conditions that define that status of the resource."},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/","title":"Enforcing authentication & authorization with Kuadrant AuthPolicy","text":"

    This guide walks you through the process of setting up a local Kubernetes cluster with Kuadrant where you will protect Gateway API endpoints by declaring Kuadrant AuthPolicy custom resources.

    Three AuthPolicies will be declared:

    Use case AuthPolicies App developer 2 AuthPolicies targeting a HTTPRoute that routes traffic to a sample \"Toy Store\" application \u2192 enforce API key authentication to all requests in this route; require API key owners to be mapped to groups:admins metadata to access a specific HTTPRouteRule of the route. Platform engineer use-case 1 AuthPolicy targeting the kuadrant-ingressgateway Gateway \u2192 enforces a trivial \"deny-all\" policy that locks down any other HTTPRoute attached to the Gateway.

    Topology:

                                \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                            \u2502        (Gateway)        \u2502   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                            \u2502 kuadrant-ingressgateway \u2502\u25c4\u2500\u2500\u2502 (AuthPolicy)  \u2502\n                            \u2502                         \u2502   \u2502    gw-auth    \u2502\n                            \u2502            *            \u2502   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                            \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                              \u25b2                      \u25b2\n                     \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510   \u2502   (HTTPRoute)    \u2502   \u2502   (HTTPRoute)    \u2502\n\u2502  (AuthPolicy)  \u2502\u2500\u2500\u25ba\u2502    toystore      \u2502   \u2502      other       \u2502\n\u2502 toystore-authn \u2502   \u2502                  \u2502   \u2502                  \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518   \u2502 api.toystore.com \u2502   \u2502 *.other-apps.com \u2502\n                     \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                      \u25b2                \u25b2\n            \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n            | (HTTPRouteRule) | | (HTTPRouteRule) |   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n            |     rule-1      | |     rule-2      |\u25c4\u2500\u2500\u2502   (AuthPolicy)  \u2502\n            |                 | |                 |   \u2502 toystore-admins \u2502\n            | - GET /cars*    | | - /admins*      |   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n            | - GET /dolls*   | \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n            \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
    "},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#setup-the-environment","title":"Setup the environment","text":"

    Follow this setup doc to set up your environment before continuing with this doc.

    "},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#deploy-the-toy-store-sample-application-persona-app-developer","title":"Deploy the Toy Store sample application (Persona: App developer)","text":"
    kubectl apply -f examples/toystore/toystore.yaml\n\nkubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - api.toystore.com\n  rules:\n  - matches: # rule-1\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/cars\"\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/dolls\"\n    backendRefs:\n    - name: toystore\n      port: 80\n  - matches: # rule-2\n    - path:\n        type: PathPrefix\n        value: \"/admin\"\n    backendRefs:\n    - name: toystore\n      port: 80\nEOF\n

    Export the gateway hostname and port:

    export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n

    Send requests to the application unprotected:

    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 200 OK\n
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/dolls -i\n# HTTP/1.1 200 OK\n
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 200 OK\n
    "},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#protect-the-toy-store-application-persona-app-developer","title":"Protect the Toy Store application (Persona: App developer)","text":"

    Create AuthPolicies to enforce the following auth rules:

    • Authentication:
    • All users must present a valid API key
    • Authorization:
    • /admin* paths (2nd rule of the HTTPRoute) require user mapped to the admins group (kuadrant.io/groups=admins annotation added to the Kubernetes API key Secret)
    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore-authn\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  defaults:\n    strategy: merge\n    rules:\n      authentication:\n        \"api-key-authn\":\n          apiKey:\n            selector:\n              matchLabels:\n                app: toystore\n          credentials:\n            authorizationHeader:\n              prefix: APIKEY\n---\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore-admins\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n    sectionName: rule-2\n  rules:\n    authorization:\n      \"only-admins\":\n        opa:\n          rego: |\n            groups := split(object.get(input.auth.identity.metadata.annotations, \"kuadrant.io/groups\", \"\"), \",\")\n            allow { groups[_] == \"admins\" }\nEOF\n

    Create the API keys (must be created in the same namespace as the Kuadrant CR):

    kubectl apply -n kuadrant-system -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-regular-user\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\nstringData:\n  api_key: iamaregularuser\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-admin-user\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    kuadrant.io/groups: admins\nstringData:\n  api_key: iamanadmin\ntype: Opaque\nEOF\n

    Send requests to the application protected by Kuadrant:

    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-key-authn\"\n# x-ext-auth-reason: credential not found\n
    curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamaregularuser' http://$GATEWAY_URL/cars -i\n# HTTP/1.1 200 OK\n
    curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamaregularuser' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
    curl -H 'Host: api.toystore.com' -H 'Authorization: APIKEY iamanadmin' http://$GATEWAY_URL/admin -i\n# HTTP/1.1 200 OK\n
    "},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#create-a-default-deny-all-policy-at-the-level-of-the-gateway-persona-platform-engineer","title":"Create a default \"deny-all\" policy at the level of the gateway (Persona: Platform engineer)","text":"

    Create the policy:

    kubectl -n gateway-system apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: gw-auth\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: kuadrant-ingressgateway\n  defaults:\n    strategy: atomic\n    rules:\n      authorization:\n        deny-all:\n          opa:\n            rego: \"allow = false\"\n      response:\n        unauthorized:\n          headers:\n            \"content-type\":\n              value: application/json\n          body:\n            value: |\n              {\n                \"error\": \"Forbidden\",\n                \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n              }\nEOF\n

    The policy won't be effective until there is at least one accepted route not yet protected by another more specific policy attached to it.

    Create a route that will inherit the default policy attached to the gateway:

    kubectl apply -f -<<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: other\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - \"*.other-apps.com\"\nEOF\n

    Send requests to the route protected by the default policy set at the level of the gateway:

    curl -H 'Host: foo.other-apps.com' http://$GATEWAY_URL/ -i\n# HTTP/1.1 403 Forbidden\n# content-type: application/json\n# x-ext-auth-reason: Unauthorized\n# [\u2026]\n#\n# {\n#   \"error\": \"Forbidden\",\n#   \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n# }\n
    "},{"location":"kuadrant-operator/doc/user-guides/auth/auth-for-app-devs-and-platform-engineers/#cleanup","title":"Cleanup","text":"
    make local-cleanup\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/","title":"Basic DNS","text":""},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/#basic-dns-setup","title":"Basic DNS setup","text":"

    The document will cover the most basic DNS setup using the Kuadrant DNSPolicy API. In order to follow this guide, it is expected that you have a cluster setup with the latest version of Kuadrant installed. Also as we are using DNS, it is also important that the Gateways are accessible either via your local network or via the public internet. DNSPolicy will work with any Gateway provider so it is not essential that you have Istio or Envoy Gateway installed, but you do need a Gateway API provider installed. We would recommend using Istio or Envoy Gateway as this will allow you to use some of the other policies provided by Kuadrant.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/#gateway-and-httproute-configuration","title":"Gateway and HTTPRoute configuration","text":"

    With a Gateway provider installed, in order to configure DNS via DNSPolicy, you must first configure a Gateway with a listener that uses a specified hostname. You must also have a HTTPRoute resource attached to this gateway listener. Below are some simple examples of these resources (note we are not using a HTTPS listener for simplicity but that will also work):

    ---\nkind: Gateway\napiVersion: gateway.networking.k8s.io/v1\nmetadata:\n  name: external\nspec:\n  gatewayClassName: istio\n  listeners:\n\n    - name: http\n      port: 8080\n      hostname: test.example.com\n      protocol: HTTP\n---\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\n  labels:\n    app: toystore\nspec:\n  parentRefs:\n    - name: external\n  hostnames: [\"test.example.com\"]\n  rules:\n    - matches:\n        - path:\n            type: PathPrefix\n            value: \"/toy\"\n          method: GET\n        - path:\n            type: Exact\n            value: \"/admin/toy\"\n          method: POST\n        - path:\n            type: Exact\n            value: \"/admin/toy\"\n          method: DELETE\n      backendRefs:\n        - name: toystore\n          port: 80\n
    With these defined, we are ready to setup DNS via DNSPolicy.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/#configure-a-dnsprovider","title":"Configure a DNSProvider","text":"

    The first step is to configure a DNSProvider. This is a simple kubernetes secret with credentials to access the DNS provider. With Kuadrant we support using AWS Route53, Azure and GCP as DNS providers. It is important that this credential has access to write and read to your DNS zones.

    More info on the various DNS Providers

    In this example we will configure an AWS route53 DNS provider:

    kubectl create secret generic aws-credentials \\\n  --namespace=my-gateway-namespace \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=XXXX \\\n  --from-literal=AWS_REGION=eu-west-1 \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=XXX\n

    With this in place we can now define our DNSPolicy resource:

    apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: basic-dnspolicy\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external\n  providerRefs:\n\n    - name: aws-credentials\n

    This resource also needs to be created in the same namespace as your Gateway and the targetRef needs to reference your gateway. When this is done we can check the status of the DNSPolicy and the Gateway to check when it is ready.

    kubectl wait dnspolicy/basic-dnspolicy -n my-gateway-namespace --for=\"condition=Ready=true\" --timeout=300s\n

    If you look at the gateway status you should also see:

      - lastTransitionTime: \"2024-10-09T11:22:10Z\"\n    message: Object affected by DNSPolicy kuadrant-system/simple-dnspolicy\n    observedGeneration: 1\n    reason: Accepted\n    status: \"True\"\n    type: kuadrant.io/DNSPolicyAffected\n

    DNS is now setup for your Gateway. After allowing a little time for the DNS propagate to the nameservers, you should be able to test the DNS using a dig command alternatively you can curl your endpoint.

    dig test.example.com +short\n\ncurl -v test.example.com/toy\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/#important-considerations","title":"Important Considerations","text":"

    With this guide, you have learned how to setup the most basic DNSPolicy. DNSPolicy is also capable of setting up advanced DNS record structure to help balance traffic across multiple gateways. With the most basic policy outlined here, you should not apply it to more than one gateway that shares a listener with the same host name. There is one exception to this rule, which is if all your gateways are using IP addresses rather than hostname addresses; in this case DNSPolicy will merge the IPs into a multi-value response. However, if your Gateways are using hostnames, DNSPolicy will set up a simple CNAME record and as there is only one record and CNAMEs cannot have multiple values by definition, one of the DNSPolicies (the last one to attempt to update the provider) will report an error.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/","title":"DNS Health Checks","text":"

    The DNS health check feature allows you to define a HTTP based health check via the DNSPolicy API that will be executed against targeted gateway listener(s) that have specified none wildcard hostnames. These health checks will flag a published endpoint as healthy or unhealthy based on the defined configuration. When unhealthy an endpoint will not be published if it has not already been published to the DNS provider, will only be unpublished if it is part of a multi-value A record and in all cases can be observable via the DNSPolicy status.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#limitations","title":"Limitations","text":"
    • We do not currently support a health check being targeted to a HTTPRoute resource: DNSPolicy can only target Gateways.
    • As mentioned above, when a record has been published using the load balancing options (GEO and Weighting) via DNSPolicy, a failing health check will not remove the endpoint record from the provider, this is to avoid an accidental NX-Domain response. If the policy is not using the load balancing options and results in a multiple value A record, then unhealthy IPs will be removed from this A record unless it would result in an empty value set.
    • Health checks will not be added to listeners that define a wildcard hostname E.G (*.example.com) as we currently cannot know which host to use to for the health check.
    "},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#configuration-of-health-checks","title":"Configuration of Health Checks","text":"

    To configure a DNS health check, you need to specify the health check section of the DNSPolicy.

    Below are some examples of DNSPolicy with health checks defined:

    1) DNSPolicy with a health check that will be applied to all listeners on a gateway that define a none wildcard hostname

    apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: gateway-dns\nspec:\n  healthCheck:\n    failureThreshold: 3\n    interval: 5m\n    path: /health\n  ...\n   targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external  \n

    2) DNSPolicy with health check that will be applied for a specific listener with a none wildcard hostname

    apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: my-listener-dns\nspec:\n  healthCheck:\n    failureThreshold: 3\n    interval: 5m\n    path: /ok #different path for this listener\n  ...\n   targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external  \n    sectionName: my-listener #notice the addition of section name here that must match the listener name\n

    These policies can be combined on a single gateway. The policy with the section name defined will override the gateway policy including the health check.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#sending-additional-headers-with-the-health-check-request","title":"Sending additional headers with the health check request","text":"

    Sometimes, it may be desirable to send some additional headers with the health check request. For example to send API key or service account token that can be defined in the request headers.

    To do this you will need to create a secret in the same namespace as the DNSPolicy with the keys and values you wish to send:

    kubectl create secret generic healthheaders --from-literal=token=supersecret -n my-dns-policy-namespace\n

    Next you will need to update the DNSPolicy to add a reference to this secret:

    apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: my-listener-dns\nspec:\n  healthCheck:\n    additionalHeadersRef: #add the following\n      name: healthheaders\n    failureThreshold: 3\n    interval: 5m\n    path: /ok\n  ...\n   targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external  \n    sectionName: my-listener\n

    The health check requests will now send the key value pairs in the secret as headers when performing a health check request.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#health-check-status","title":"Health Check Status","text":"

    When all health checks based on a DNSPolicy are passing you will see the following status:

        - lastTransitionTime: \"2024-11-14T12:33:13Z\"\n      message: All sub-resources are healthy\n      reason: SubResourcesHealthy\n      status: \"True\"\n      type: SubResourcesHealthy\n

    If one or more of the health checks are failing you will see a status in the DNSPolicy simiar to the one shown below:

       - lastTransitionTime: \"2024-11-15T10:40:15Z\"\n      message: 'DNSPolicy has encountered some issues: not all sub-resources of policy\n        are passing the policy defined health check. Not healthy DNSRecords are: external-t1b '\n      reason: Unknown\n      status: \"False\"\n      type: SubResourcesHealthy\n    observedGeneration: 1\n    recordConditions:\n      t1b.cb.hcpapps.net:\n      - lastTransitionTime: \"2024-11-15T10:40:14Z\"\n        message: 'Not healthy addresses: [aeeba26642f1b47d9816297143e2d260-434484576.eu-west-1.elb.amazonaws.com]'\n        observedGeneration: 1\n        reason: health checksFailed\n        status: \"False\"\n        type: Healthy\n

    Finally, you can also take a look at the underlying individual health check status by inspecting the dnshealthcheckprobe resource:

    Note: These resources are for view only interactions as they are controlled by the Kuadrant Operator based on the DNSPolicy API

    kubectl get dnshealthcheckprobes n my-dns-policy-namespace -o=wide\n

    If you look at the status of one of these you can see additional information:

    status:\n  consecutiveFailures: 3\n  healthy: false\n  observedGeneration: 1\n  reason: 'Status code: 503'\n  status: 503\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/dnshealthchecks/#manually-removing-unhealthy-records","title":"Manually removing unhealthy records","text":"

    If you have a failing health check for one of your gateway listeners and you would like to remove it from the DNS provider, you can do this by deleting the associated DNSRecord resource.

    Finding the correct record

    DNSRecord resources are kept in the same namespace as the DNSPolicy that configured and created them.

    kubectl get dnsrecords.kuadrant.io -n <dns-policy-namespace>\n

    As shown above, when a health check is failing, the DNSPolicy will show a status for that listener host to surface that failure:

    recordConditions:\n    t1a.cb.hcpapps.net:\n\n    - lastTransitionTime: \"2024-11-27T14:00:52Z\"\n      message: 'Not healthy addresses: [ae4d131ee5d7b4fb098f4afabf4aba4c-513237325.us-east-1.elb.amazonaws.com]'\n      observedGeneration: 1\n      reason: HealthChecksFailed\n      status: \"False\"\n      type: Healthy\n

    The DNSRecord resource is named after the gateway and the listener name. So if you have a gateway called ingress and a listener called example you will have a DNSRecord resource named ingress-example in the same namespace as your DNSPolicy. So from this status you can get the hostname and find the associated listener on your gateway. You can then delete the associated DNSRecord resource.

    kubectl delete dnsrecord.kuadrant.io <gateway-name>-<listener-name> -n <dns policy namespace>\n

    Removing this resource will remove all of the associated DNS records in the DNS provider and while the health check is failing, the dns operator will not re-publish these records.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/","title":"Gateway DNS configuration for routes attached to a ingress gateway","text":"

    This user guide walks you through an example of how to configure DNS for all routes attached to an ingress gateway.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#requisites","title":"Requisites","text":"
    • Docker
    • Rout53 Hosted Zone
    "},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#setup-the-environment","title":"Setup the environment","text":"

    Follow this setup doc to set up your environment before continuing with this doc.

    Create a namespace:

    kubectl create namespace my-gateways\n

    Export a root domain and hosted zone id:

    export ROOT_DOMAIN=<ROOT_DOMAIN>\n

    Note: ROOT_DOMAIN should be set to your AWS hosted zone name.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#create-a-dns-provider-secret","title":"Create a dns provider secret","text":"

    Create AWS provider secret. You should limit the permissions of this credential to only the zones you want us to access.

    export AWS_ACCESS_KEY_ID=<AWS_ACCESS_KEY_ID> AWS_SECRET_ACCESS_KEY=<AWS_SECRET_ACCESS_KEY>\n\nkubectl -n my-gateways create secret generic aws-credentials \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#create-an-ingress-gateway","title":"Create an ingress gateway","text":"

    Create a gateway using your ROOT_DOMAIN as part of a listener hostname:

    kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: prod-web\nspec:\n  gatewayClassName: istio\n  listeners:\n\n    - allowedRoutes:\n        namespaces:\n          from: All\n      name: api\n      hostname: \"*.$ROOT_DOMAIN\"\n      port: 80\n      protocol: HTTP\nEOF\n

    Check gateway status:

    kubectl get gateway prod-web -n my-gateways\n

    Response:

    NAME       CLASS   ADDRESS        PROGRAMMED   AGE\nprod-web   istio   172.18.200.1   True         25s\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#enable-dns-on-the-gateway","title":"Enable DNS on the gateway","text":"

    Create a Kuadrant DNSPolicy to configure DNS:

    kubectl -n my-gateways apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: prod-web\nspec:\n  targetRef:\n    name: prod-web\n    group: gateway.networking.k8s.io\n    kind: Gateway\nEOF\n

    Check policy status:

    kubectl get dnspolicy -o wide -n my-gateways\n

    Response:

    NAME       STATUS     TARGETREFKIND   TARGETREFNAME   AGE\nprod-web   Accepted   Gateway         prod-web        26s\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#deploy-a-sample-api-to-test-dns","title":"Deploy a sample API to test DNS","text":"

    Deploy the sample API:

    kubectl -n my-gateways apply -f examples/toystore/toystore.yaml\nkubectl -n my-gateways wait --for=condition=Available deployments toystore --timeout=60s\n

    Route traffic to the API from our gateway:

    kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: prod-web\n    namespace: my-gateways\n  hostnames:\n  - \"*.$ROOT_DOMAIN\"\n  rules:\n  - backendRefs:\n    - name: toystore\n      port: 80\nEOF\n

    Verify a DNSRecord resource is created:

    kubectl get dnsrecords -n my-gateways\nNAME           READY\nprod-web-api   True\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#verify-dns-works-by-sending-requests","title":"Verify DNS works by sending requests","text":"

    Verify DNS using dig:

    dig foo.$ROOT_DOMAIN +short\n

    Response:

    172.18.200.1\n

    Verify DNS using curl:

    curl http://api.$ROOT_DOMAIN\n

    Response:

    {\n  \"method\": \"GET\",\n  \"path\": \"/\",\n  \"query_string\": null,\n  \"body\": \"\",\n  \"headers\": {\n    \"HTTP_HOST\": \"api.$ROOT_DOMAIN\",\n    \"HTTP_USER_AGENT\": \"curl/7.85.0\",\n    \"HTTP_ACCEPT\": \"*/*\",\n    \"HTTP_X_FORWARDED_FOR\": \"10.244.0.1\",\n    \"HTTP_X_FORWARDED_PROTO\": \"http\",\n    \"HTTP_X_ENVOY_INTERNAL\": \"true\",\n    \"HTTP_X_REQUEST_ID\": \"9353dd3d-0fe5-4404-86f4-a9732a9c119c\",\n    \"HTTP_X_ENVOY_DECORATOR_OPERATION\": \"toystore.my-gateways.svc.cluster.local:80/*\",\n    \"HTTP_X_ENVOY_PEER_METADATA\": \"ChQKDkFQUF9DT05UQUlORVJTEgIaAAoaCgpDTFVTVEVSX0lEEgwaCkt1YmVybmV0ZXMKHQoMSU5TVEFOQ0VfSVBTEg0aCzEwLjI0NC4wLjIyChkKDUlTVElPX1ZFUlNJT04SCBoGMS4xNy4yCtcBCgZMQUJFTFMSzAEqyQEKIwoVaXN0aW8uaW8vZ2F0ZXdheS1uYW1lEgoaCHByb2Qtd2ViChkKDGlzdGlvLmlvL3JldhIJGgdkZWZhdWx0CjMKH3NlcnZpY2UuaXN0aW8uaW8vY2Fub25pY2FsLW5hbWUSEBoOcHJvZC13ZWItaXN0aW8KLwojc2VydmljZS5pc3Rpby5pby9jYW5vbmljYWwtcmV2aXNpb24SCBoGbGF0ZXN0CiEKF3NpZGVjYXIuaXN0aW8uaW8vaW5qZWN0EgYaBHRydWUKGgoHTUVTSF9JRBIPGg1jbHVzdGVyLmxvY2FsCigKBE5BTUUSIBoecHJvZC13ZWItaXN0aW8tYzU0NWQ4ZjY4LTdjcjg2ChoKCU5BTUVTUEFDRRINGgtteS1nYXRld2F5cwpWCgVPV05FUhJNGktrdWJlcm5ldGVzOi8vYXBpcy9hcHBzL3YxL25hbWVzcGFjZXMvbXktZ2F0ZXdheXMvZGVwbG95bWVudHMvcHJvZC13ZWItaXN0aW8KFwoRUExBVEZPUk1fTUVUQURBVEESAioACiEKDVdPUktMT0FEX05BTUUSEBoOcHJvZC13ZWItaXN0aW8=\",\n    \"HTTP_X_ENVOY_PEER_METADATA_ID\": \"router~10.244.0.22~prod-web-istio-c545d8f68-7cr86.my-gateways~my-gateways.svc.cluster.local\",\n    \"HTTP_X_ENVOY_ATTEMPT_COUNT\": \"1\",\n    \"HTTP_X_B3_TRACEID\": \"d65f580db9c6a50c471cdb534771c61a\",\n    \"HTTP_X_B3_SPANID\": \"471cdb534771c61a\",\n    \"HTTP_X_B3_SAMPLED\": \"0\",\n    \"HTTP_VERSION\": \"HTTP/1.1\"\n  },\n  \"uuid\": \"0ecb9f84-db30-4289-a3b8-e22d4021122f\"\n}\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/gateway-dns/#cleanup","title":"Cleanup","text":"
    make local-cleanup\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/","title":"Load Balanced DNS","text":""},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#overview","title":"Overview","text":"

    This document will show you how to setup a load balanced DNS configuration using the (DNSPolicy)[https://docs.kuadrant.io/latest/kuadrant-operator/doc/reference/dnspolicy/] API. When we say \"load balanced\", this means we configure the DNS provider (AWS, GCP etc) to return different gateway/loadbalancer addresses to queries from DNS clients based on specific weighting and geo location configuration.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#when-should-i-use-a-load-balanced-dns-policy","title":"When should I use a load balanced DNS policy?","text":"

    It is most useful to use the load balancing options when targeting multiple gateways that share a listener host E.G (api.example.com). It is also perfectly valid to use it when you only have a single gateway; this provides the benefit of allowing you to easily expand beyond this single gateway for a given shared hostname. It is worth knowing that the load balanced DNSpolicy comes with a relatively small additional cost of some added records and lookups during DNS resolution vs a \"simple\" DNSPolicy with no load balancing specified as the latter only sets up a simple A or CNAME record. So in summary if you expect to need multiple gateways for a given listener host then you should take advantage of the load balanced option.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#important-considerations","title":"Important Considerations","text":"
    • When using a DNSPolicy with a load balanced configuration, all DNSPolicies effecting a listener with the same hostname should have load balanced options set. Without the load balanced configuration, Kuadrant's dns controller will try to set up only a simple A or CNAME record.
    • When setting geographic configuration, only ever set one unique GEO as the default GEO across all instances of DNSPolicy targeting a listener with the same hostname. If you set different defaults for a single listener hostname, the dns controllers will constantly attempt to bring the default into the state they each feel is correct.
    • If you want different load balancing options for a particular listener in a gateway, you can target that listener directly with DNSPolicy via the targetRef sectionName property.
    • If you do not use the load balanced configuration, a simple single A or CNAME record is set up. Later if you need to move to load balanced, you will need to delete and recreate your policy.
    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#dns-provider-setup","title":"DNS Provider Setup","text":"

    A DNSPolicy acts against a target Gateway or a target listener within a gateway by processing the hostnames on the targeted listeners. Using these it can create dns records using the address exposed in the Gateway's status block. In order for Kuadrant's DNS component to do this, it must be able to access and know which DNS provider to use. This is done through the creation of a dns provider secret containing the needed credentials and the provider identifier.

    (Learn more about how to setup a DNS Provider)[https://docs.kuadrant.io/latest/dns-operator/docs/provider/]

    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#loadbalanced-dnspolicy-creation-and-attachment","title":"LoadBalanced DNSPolicy creation and attachment","text":"

    Once an appropriate provider credential is configured, we can now create and attach a DNSPolicy to start managing DNS for the listeners on our Gateway. Below is an example.

    apiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: prod-web\n  namespace: ingress-gateway\nspec:\n  targetRef:\n    name: prod-web\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    sectionName: listenerName \n  providerRef:\n    name: my-aws-credentials \n  loadBalancing:\n    weight: 120 \n    geo: GEO-EU \n    defaultGeo: true\n
    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#load-balancing-section","title":"Load Balancing section","text":"

    This section must be filled out and indicates to the dns component that the targets of this policy should be setup to handle more than one gateway. It is required to define values for the weighted and geo options. These values are used for the records created by the policy controller based on the target gateway. To read more detail about each of the fields in the loadbalanced section take a look at DNS Overview

    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#locations-supported-per-dns-provider","title":"Locations supported per DNS provider","text":"Supported AWS GCP Continents Country codes States Regions"},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#continents-and-country-codes-supported-by-aws-route-53","title":"Continents and country codes supported by AWS Route 53","text":"

    :Note: For more information please the official AWS documentation

    To see all regions supported by AWS Route 53, please see the official (documentation)[https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values-geo.html]. With Route 53 when setting a continent code use a \"GEO-\" prefix otherwise it will be considered a country code.

    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#regions-supported-by-gcp-cloud-dns","title":"Regions supported by GCP Cloud DNS","text":"

    To see all regions supported by GCP Cloud DNS, please see the official (documentation)[https://cloud.google.com/compute/docs/regions-zones]

    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#regions-and-countries-supported-by-azure-cloud-dns","title":"Regions and Countries supported by Azure Cloud DNS","text":"

    To see the different values you can use for the geo based DNS with Azure take a look at the following (documentation)[https://learn.microsoft.com/en-us/azure/traffic-manager/traffic-manager-geographic-regions]

    "},{"location":"kuadrant-operator/doc/user-guides/dns/load-balanced-dns/#moving-from-non-load-balanced-to-load-balanced-or-vice-versa","title":"Moving from non load balanced to load balanced or vice versa","text":"

    It is possible to update a DNSPolicy that has no load balancing options set to one that has these options set and vice versa. Underneath, the DNS Operator will remove the existing records and replace them with the correct set of records based on your configuration. It is important however that when using DNSPolicy across multiple Gateways that share a hostname, the DNSPolicies targeting a listener with a shared hostname all use a load balancing configuration (or absence thereof). It is invalid to have two DNSPolcies targeting a listener with a shared hostname that use different dns strategies. Doing so will cause one of the DNSPolicies to fail to be enforced and report an error caused by an inability to bring the DNS records into a consistent state.

    Example:

    If you have gateway1 with listener example with a hostname of example.com and you have a separate gateway gateway2 with the same listener definition as gateway1 (perhaps on a different cluster in a different region), you should ensure that the DNSPolcies targeting these listeners are both using a loadbalanced configuration. Below is an example of valid and invalid configuration.

    Valid Config

    Given a gateway deployed on two different cluster in two different locations:

    # example gateway\nkind: Gateway\napiVersion: gateway.networking.k8s.io/v1\nmetadata:\n  name: api-gateway\nspec:\n  gatewayClassName: istio\n  listeners:\n\n    - name: example\n      port: 80\n      hostname: 'api.example.com'\n      protocol: HTTP\n
    # gateway 1\n\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: dnspolicy-gateway1\nspec:\n  loadBalancing:\n    weight: 130\n    geo: GEO-EU\n    defaultGeo: true\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: api-gateway\n    sectionName: example\n  providerRefs:\n\n    - name: aws-provider-credentials\n\n# gateway 2\n\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: dnspolicy-gateway2\nspec:\n  loadBalancing:\n    weight: 130\n    geo: US\n    defaultGeo: false\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: api-gateway\n    sectionName: example\n  providerRefs:\n\n    - name: aws-provider-credentials\n

    Invalid Config

    # gateway 1\n\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: dnspolicy-gateway1\nspec:\n  loadBalancing:\n    weight: 130\n    geo: GEO-EU\n    defaultGeo: true\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: api-gateway\n    sectionName: example\n  providerRefs:\n\n    - name: aws-provider-credentials\n\n# gateway 2\n\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: dnspolicy-gateway2\nspec: #notice no loadbalancing defined\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: api-gateway\n    sectionName: example\n  providerRefs:\n\n    - name: aws-provider-credentials\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/","title":"Secure, protect, and connect APIs with Kuadrant","text":""},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#overview","title":"Overview","text":"

    This guide walks you through using Kuadrant to secure, protect, and connect an API exposed by a Gateway (Kubernetes Gateway API) from the personas platform engineer and application developer. For more information on the different personas please see the Gateway API documentation

    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#prerequisites","title":"Prerequisites","text":"
    • Kubernetes cluster with Kuadrant operator installed.
    • kubectl command line tool.
    • AWS/Azure or GCP with DNS capabilities.
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#set-the-environment-variables","title":"Set the environment variables","text":"

    Set the following environment variables used for convenience in this guide:

    export KUADRANT_GATEWAY_NS=api-gateway # Namespace for the example Gateway\nexport KUADRANT_GATEWAY_NAME=external # Name for the example Gateway\nexport KUADRANT_DEVELOPER_NS=toystore # Namespace for an example toystore app\nexport KUADRANT_AWS_ACCESS_KEY_ID=xxxx # AWS Key ID with access to manage the DNS Zone ID below\nexport KUADRANT_AWS_SECRET_ACCESS_KEY=xxxx # AWS Secret Access Key with access to manage the DNS Zone ID below\nexport KUADRANT_AWS_DNS_PUBLIC_ZONE_ID=xxxx # AWS Route 53 Zone ID for the Gateway\nexport KUADRANT_ZONE_ROOT_DOMAIN=example.com # Root domain associated with the Zone ID above\nexport KUADRANT_CLUSTER_ISSUER_NAME=self-signed # Name for the ClusterIssuer\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#set-up-a-dns-provider","title":"Set up a DNS Provider","text":"

    The DNS provider declares credentials to access the zone(s) that Kuadrant can use to set up DNS configuration. Ensure that this credential only has access to the zones you want Kuadrant to manage via DNSPolicy

    Create the namespace the Gateway will be deployed in:

    kubectl create ns ${KUADRANT_GATEWAY_NS}\n

    Create the secret credentials in the same namespace as the Gateway - these will be used to configure DNS:

    kubectl -n ${KUADRANT_GATEWAY_NS} create secret generic aws-credentials \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=$KUADRANT_AWS_ACCESS_KEY_ID \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=$KUADRANT_AWS_SECRET_ACCESS_KEY\n

    Before adding a TLS issuer, create the secret credentials in the cert-manager namespace:

    kubectl -n cert-manager create secret generic aws-credentials \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=$KUADRANT_AWS_ACCESS_KEY_ID \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=$KUADRANT_AWS_SECRET_ACCESS_KEY\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#deploy-the-toystore-app","title":"Deploy the Toystore app","text":"

    Create the namespace for the Toystore application:

    kubectl create ns ${KUADRANT_DEVELOPER_NS}\n

    Deploy the Toystore app to the developer namespace:

    kubectl apply -f https://raw.githubusercontent.com/Kuadrant/Kuadrant-operator/main/examples/toystore/toystore.yaml -n ${KUADRANT_DEVELOPER_NS}\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#add-a-tls-issuer","title":"Add a TLS issuer","text":"

    To secure communication to the Gateways, define a TLS issuer for TLS certificates.

    Note

    This example uses Let's Encrypt, but you can use any issuer supported by cert-manager.

    kubectl apply -f - <<EOF\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n  name: ${KUADRANT_CLUSTER_ISSUER_NAME}\nspec:\n  selfSigned: {}\nEOF\n

    Wait for the ClusterIssuer to become ready.

    kubectl wait clusterissuer/${KUADRANT_CLUSTER_ISSUER_NAME} --for=condition=ready=true\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#deploy-a-gateway","title":"Deploy a Gateway","text":"
    kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}\n  namespace: ${KUADRANT_GATEWAY_NS}\n  labels:\n    kuadrant.io/gateway: \"true\"\nspec:\n    gatewayClassName: istio\n    listeners:\n\n    - allowedRoutes:\n        namespaces:\n          from: All \n      hostname: \"api.${KUADRANT_ZONE_ROOT_DOMAIN}\"\n      name: api\n      port: 443\n      protocol: HTTPS\n      tls:\n        certificateRefs:\n        - group: \"\"\n          kind: Secret\n          name: api-${KUADRANT_GATEWAY_NAME}-tls\n        mode: Terminate\nEOF\n

    Check the status of the Gateway ensuring the gateway is Accepted and Programmed:

    kubectl get gateway ${KUADRANT_GATEWAY_NAME} -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Programmed\")].message}'\n

    Check the status of the listener, you will see that it is not yet programmed or ready to accept traffic due to bad TLS configuration. This will be fixed in the next step with the TLSPolicy:

    kubectl get gateway ${KUADRANT_GATEWAY_NAME} -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.listeners[0].conditions[?(@.type==\"Programmed\")].message}'\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#secure-and-protect-the-gateway-with-auth-rate-limit-and-dns-policies","title":"Secure and protect the Gateway with Auth, Rate Limit, and DNS policies.","text":""},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#deploy-the-gateway-tls-policy","title":"Deploy the gateway TLS policy","text":"
    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: TLSPolicy\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}-tls\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  targetRef:\n    name: ${KUADRANT_GATEWAY_NAME}\n    group: gateway.networking.k8s.io\n    kind: Gateway\n  issuerRef:\n    group: cert-manager.io\n    kind: ClusterIssuer\n    name: ${KUADRANT_CLUSTER_ISSUER_NAME}\nEOF\n

    Check that the TLSpolicy has an Accepted and Enforced status (This may take a few minutes for certain provider e.g Lets Encrypt):

    kubectl get tlspolicy ${KUADRANT_GATEWAY_NAME}-tls -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#setup-toystore-application-httproute","title":"Setup Toystore application HTTPRoute","text":"
    kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\n  namespace: ${KUADRANT_DEVELOPER_NS}\n  labels:\n    deployment: toystore\n    service: toystore\nspec:\n  parentRefs:\n\n  - name: ${KUADRANT_GATEWAY_NAME}\n    namespace: ${KUADRANT_GATEWAY_NS}\n  hostnames:\n  - \"api.${KUADRANT_ZONE_ROOT_DOMAIN}\"\n  rules:\n  - matches:\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/cars\"\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/health\"    \n    backendRefs:\n    - name: toystore\n      port: 80  \nEOF\n

    While the Gateway is now deployed, it currently has exposed endpoints. The next steps will be defining an AuthPolicy to set up a default 403 response for any unprotected endpoints, as well as a RateLimitPolicy to set up a default unrealistic low global limit to further protect any exposed endpoints.

    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#set-the-deny-all-gateway-authpolicy","title":"Set the Deny all Gateway AuthPolicy","text":"
    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}-auth\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: ${KUADRANT_GATEWAY_NAME}\n  defaults:\n   when:\n\n     - predicate: \"request.path != '/health'\"\n   rules:\n    authorization:\n      deny-all:\n        opa:\n          rego: \"allow = false\"\n    response:\n      unauthorized:\n        headers:\n          \"content-type\":\n            value: application/json\n        body:\n          value: |\n            {\n              \"error\": \"Forbidden\",\n              \"message\": \"Access denied by default by the gateway operator. If you are the administrator of the service, create a specific auth policy for the route.\"\n            }\nEOF\n

    Check that the AuthPolicy has Accepted and Enforced status:

    kubectl get authpolicy ${KUADRANT_GATEWAY_NAME}-auth -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#deploy-the-low-limit-gateway-ratelimitpolicy","title":"Deploy the low-limit Gateway RateLimitPolicy","text":"
    kubectl apply -f  - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}-rlp\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: ${KUADRANT_GATEWAY_NAME}\n  defaults:\n    limits:\n      \"low-limit\":\n        rates:\n\n        - limit: 1\n          window: 10s\nEOF\n

    Check that the RateLimitPolicy has Accepted and Enforced status:

    kubectl get ratelimitpolicy ${KUADRANT_GATEWAY_NAME}-rlp -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#create-the-gateway-dnspolicy","title":"Create the Gateway DNSPolicy","text":"
    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: DNSPolicy\nmetadata:\n  name: ${KUADRANT_GATEWAY_NAME}-dnspolicy\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  healthCheck:\n    failureThreshold: 3\n    interval: 1m\n    path: /health\n  loadBalancing:\n    defaultGeo: true\n    geo: GEO-NA\n    weight: 120\n  targetRef:\n    name: ${KUADRANT_GATEWAY_NAME}\n    group: gateway.networking.k8s.io\n    kind: Gateway\n  providerRefs:\n\n  - name: aws-credentials # Secret created earlier\nEOF\n

    Check that the DNSPolicy has been Accepted and Enforced (This mat take a few minutes):

    kubectl get dnspolicy ${KUADRANT_GATEWAY_NAME}-dnspolicy -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#dns-health-checks","title":"DNS Health checks","text":"

    DNS Health checks has been enabled on the DNSPolicy. These health checks will flag a published endpoint as healthy or unhealthy based on the defined configuration. When unhealthy an endpoint will not be published if it has not already been published to the DNS provider, will only be unpublished if it is part of a multi-value A record and in all cases can be observable via the DNSPolicy status. For more information see DNS Health checks documentation

    Check the status of the health checks as follow:

    kubectl get dnspolicy ${KUADRANT_GATEWAY_NAME}-dnspolicy -n ${KUADRANT_GATEWAY_NS} -o=jsonpath='{.status.conditions[?(@.type==\"SubResourcesHealthy\")].message}'\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#test-the-low-limit-and-deny-all-policies","title":"Test the low-limit and deny all policies","text":"
    while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null  \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#override-the-gateways-deny-all-authpolicy","title":"Override the Gateway's deny-all AuthPolicy","text":""},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#set-up-api-key-auth-flow","title":"Set up API key auth flow","text":"

    Set up an example API key for the new users:

    export KUADRANT_SYSTEM_NS=$(kubectl get kuadrant -A -o jsonpath=\"{.items[0].metadata.namespace}\")\n
    kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: bob-key\n  namespace: ${KUADRANT_SYSTEM_NS}\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: bob\nstringData:\n  api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: alice-key\n  namespace: ${KUADRANT_SYSTEM_NS}\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: alice\nstringData:\n  api_key: IAMALICE\ntype: Opaque\nEOF\n

    Create a new AuthPolicy in a different namespace that overrides the Deny all created earlier:

    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore-auth\n  namespace: ${KUADRANT_DEVELOPER_NS}\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  defaults:\n   when:\n\n     - predicate: \"request.path != '/health'\"  \n   rules:\n    authentication:\n      \"api-key-users\":\n        apiKey:\n          selector:\n            matchLabels:\n              app: toystore\n        credentials:\n          authorizationHeader:\n            prefix: APIKEY\n    response:\n      success:\n        filters:\n          \"identity\":\n            json:\n              properties:\n                \"userid\":\n                  selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#override-low-limit-ratelimitpolicy-for-specific-users","title":"Override low-limit RateLimitPolicy for specific users","text":"

    Create a new RateLimitPolicy in a different namespace to override the default RateLimitPolicy created earlier:

    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: toystore-rlp\n  namespace: ${KUADRANT_DEVELOPER_NS}\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  limits:\n    \"general-user\":\n      rates:\n\n      - limit: 5\n        window: 10s\n      counters:\n      - expression: auth.identity.userid\n      when:\n      - predicate: \"auth.identity.userid != 'bob'\"\n    \"bob-limit\":\n      rates:\n      - limit: 2\n        window: 10s\n      when:\n      - predicate: \"auth.identity.userid == 'bob'\"\nEOF\n

    The RateLimitPolicy should be Accepted and Enforced:

    kubectl get ratelimitpolicy -n ${KUADRANT_DEVELOPER_NS} toystore-rlp -o=jsonpath='{.status.conditions[?(@.type==\"Accepted\")].message}{\"\\n\"}{.status.conditions[?(@.type==\"Enforced\")].message}'\n

    Check the status of the HTTPRoute, is now affected by the RateLimitPolicy in the same namespace:

    kubectl get httproute toystore -n ${KUADRANT_DEVELOPER_NS} -o=jsonpath='{.status.parents[0].conditions[?(@.type==\"kuadrant.io/RateLimitPolicyAffected\")].message}'\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#test-the-new-rate-limit-and-auth-policy","title":"Test the new Rate limit and Auth policy","text":""},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#send-requests-as-alice","title":"Send requests as Alice:","text":"

    You should see status 200 every second for 5 second followed by stats 429 every second for 5 seconds

    while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#send-requests-as-bob","title":"Send requests as Bob:","text":"

    You should see status 200 every second for 2 seconds followed by stats 429 every second for 8 seconds

    while :; do curl -k --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' \"https://api.$KUADRANT_ZONE_ROOT_DOMAIN/cars\" | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#next-steps","title":"Next Steps","text":"
    • mTLS Configuration To learn more about Kuadrant and see more how to guides, visit Kuadrant documentation
    "},{"location":"kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/#optional","title":"Optional","text":"

    If you have prometheus in your cluster, set up a PodMonitor to configure it to scrape metrics directly from the Gateway pod. This must be done in the namespace where the Gateway is running. This configuration is required for metrics such as istio_requests_total.

    kubectl apply -f - <<EOF\napiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: istio-proxies-monitor\n  namespace: ${KUADRANT_GATEWAY_NS}\nspec:\n  selector:\n    matchExpressions:\n\n      - key: istio-prometheus-ignore\n        operator: DoesNotExist\n  podMetricsEndpoints:\n    - path: /stats/prometheus\n      interval: 30s\n      relabelings:\n        - action: keep\n          sourceLabels: [\"__meta_kubernetes_pod_container_name\"]\n          regex: \"istio-proxy\"\n        - action: keep\n          sourceLabels:\n            [\"__meta_kubernetes_pod_annotationpresent_prometheus_io_scrape\"]\n        - action: replace\n          regex: (\\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})\n          replacement: \"[\\$2]:\\$1\"\n          sourceLabels:\n            [\n              \"__meta_kubernetes_pod_annotation_prometheus_io_port\",\n              \"__meta_kubernetes_pod_ip\",\n            ]\n          targetLabel: \"__address__\"\n        - action: replace\n          regex: (\\d+);((([0-9]+?)(\\.|$)){4})\n          replacement: \"\\$2:\\$1\"\n          sourceLabels:\n            [\n              \"__meta_kubernetes_pod_annotation_prometheus_io_port\",\n              \"__meta_kubernetes_pod_ip\",\n            ]\n          targetLabel: \"__address__\"\n        - action: labeldrop\n          regex: \"__meta_kubernetes_pod_label_(.+)\"\n        - sourceLabels: [\"__meta_kubernetes_namespace\"]\n          action: replace\n          targetLabel: namespace\n        - sourceLabels: [\"__meta_kubernetes_pod_name\"]\n          action: replace\n          targetLabel: pod_name\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/","title":"Authenticated Rate Limiting for Application developers","text":"

    For more info on the different personas see Gateway API

    This user guide walks you through an example of how to configure authenticated rate limiting for an application using Kuadrant.

    Authenticated rate limiting rate limits the traffic directed to an application based on attributes of the client user, who is authenticated by some authentication method. A few examples of authenticated rate limiting use cases are:

    • User A can send up to 50rps (\"requests per second\"), while User B can send up to 100rps.
    • Each user can send up to 20rpm (\"request per minute\").
    • Admin users (members of the 'admin' group) can send up to 100rps, while regular users (non-admins) can send up to 20rpm and no more than 5rps.

    In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API exposes an endpoint at GET http://api.toystore.com/toy, to mimic an operation of reading toy records.

    We will define 2 users of the API, which can send requests to the API at different rates, based on their user IDs. The authentication method used is API key.

    User ID Rate limit alice 5rp10s (\"5 requests every 10 seconds\") bob 2rp10s (\"2 requests every 10 seconds\")"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#setup-the-environment","title":"Setup the environment","text":"

    Follow this setup doc to set up your environment before continuing with this doc.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#deploy-the-toy-store-api","title":"Deploy the Toy Store API","text":"

    Create the deployment:

    kubectl apply -f examples/toystore/toystore.yaml\n

    Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:

    kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - api.toystore.com\n  rules:\n  - matches:\n    - path:\n        type: Exact\n        value: \"/toy\"\n      method: GET\n    backendRefs:\n    - name: toystore\n      port: 80\nEOF\n

    Export the gateway hostname and port:

    export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n

    Verify the route works:

    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n

    Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:

    kubectl port-forward -n gateway-system service/kuadrant-ingressgateway-istio 9080:80 >/dev/null 2>&1 &\nexport GATEWAY_URL=localhost:9080\n
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#enforce-authentication-on-requests-to-the-toy-store-api","title":"Enforce authentication on requests to the Toy Store API","text":"

    Create a Kuadrant AuthPolicy to configure the authentication:

    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  rules:\n    authentication:\n      \"api-key-users\":\n        apiKey:\n          selector:\n            matchLabels:\n              app: toystore\n          allNamespaces: true\n        credentials:\n          authorizationHeader:\n            prefix: APIKEY\n    response:\n      success:\n        filters:\n          \"identity\":\n            json:\n              properties:\n                \"userid\":\n                  selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n

    Verify the authentication works by sending a request to the Toy Store API without API key:

    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-key-users\"\n# x-ext-auth-reason: \"credential not found\"\n

    Create API keys for users alice and bob to authenticate:

    Note: Kuadrant stores API keys as Kubernetes Secret resources. User metadata can be stored in the annotations of the resource.

    kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: bob-key\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: bob\nstringData:\n  api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: alice-key\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: alice\nstringData:\n  api_key: IAMALICE\ntype: Opaque\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#enforce-authenticated-rate-limiting-on-requests-to-the-toy-store-api","title":"Enforce authenticated rate limiting on requests to the Toy Store API","text":"

    Create a Kuadrant RateLimitPolicy to configure rate limiting:

    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  limits:\n    \"alice-limit\":\n      rates:\n\n      - limit: 5\n        window: 10s\n      when:\n      - predicate: \"auth.identity.userid == 'alice'\"\n    \"bob-limit\":\n      rates:\n      - limit: 2\n        window: 10s\n      when:\n      - predicate: \"auth.identity.userid == 'bob'\"\nEOF\n

    Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

    Verify the rate limiting works by sending requests as Alice and Bob.

    Up to 5 successful (200 OK) requests every 10 seconds allowed for Alice, then 429 Too Many Requests:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

    Up to 2 successful (200 OK) requests every 10 seconds allowed for Bob, then 429 Too Many Requests:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/#cleanup","title":"Cleanup","text":"
    make local-cleanup\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/","title":"Authenticated Rate Limiting with JWTs and Kubernetes RBAC","text":"

    This user guide walks you through an example of how to use Kuadrant to protect an application with policies to enforce:

    • authentication based OpenId Connect (OIDC) ID tokens (signed JWTs), issued by a Keycloak server;
    • alternative authentication method by Kubernetes Service Account tokens;
    • authorization delegated to Kubernetes RBAC system;
    • rate limiting by user ID.

    In this example, we will protect a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request.

    The API listens to requests at the hostnames *.toystore.com, where it exposes the endpoints GET /toy*, POST /admin/toy and DELETE /amind/toy, respectively, to mimic operations of reading, creating, and deleting toy records.

    Any authenticated user/service account can send requests to the Toy Store API, by providing either a valid Keycloak-issued access token or Kubernetes token.

    Privileges to execute the requested operation (read, create or delete) will be granted according to the following RBAC rules, stored in the Kubernetes authorization system:

    Operation Endpoint Required role Read GET /toy* toystore-reader Create POST /admin/toy toystore-write Delete DELETE /admin/toy toystore-write

    Each user will be entitled to a maximum of 5rp10s (5 requests every 10 seconds).

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#setup-the-environment","title":"Setup the environment","text":"

    Follow this setup doc to set up your environment before continuing with this doc.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#deploy-the-toystore-example-api","title":"Deploy the Toystore example API:","text":"
    kubectl apply -f examples/toystore/toystore.yaml\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#api-lifecycle","title":"API lifecycle","text":""},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-unprotected","title":"Try the API unprotected","text":"

    Export the gateway hostname and port:

    export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n

    It should return 200 OK.

    Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:

    kubectl port-forward -n gateway-system service/kuadrant-ingressgateway-istio 9080:80 >/dev/null 2>&1 &\nexport GATEWAY_URL=localhost:9080\n
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#deploy-keycloak","title":"Deploy Keycloak","text":"

    Create the namesapce:

    kubectl create namespace keycloak\n

    Deploy Keycloak with a bootstrap realm, users, and clients:

    kubectl apply -n keycloak -f https://raw.githubusercontent.com/Kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    Note: The Keycloak server may take a couple of minutes to be ready.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#enforce-authentication-and-authorization-for-the-toy-store-api","title":"Enforce authentication and authorization for the Toy Store API","text":"

    Create a Kuadrant AuthPolicy to configure authentication and authorization:

    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: AuthPolicy\nmetadata:\n  name: toystore-protection\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  rules:\n    authentication:\n      \"keycloak-users\":\n        jwt:\n          issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n      \"k8s-service-accounts\":\n        kubernetesTokenReview:\n          audiences:\n\n          - https://kubernetes.default.svc.cluster.local\n        overrides:\n          \"sub\":\n            selector: auth.identity.user.username\n    authorization:\n      \"k8s-rbac\":\n        kubernetesSubjectAccessReview:\n          user:\n            selector: auth.identity.sub\n    response:\n      success:\n        filters:\n          \"identity\":\n            json:\n              properties:\n                \"userid\":\n                  selector: auth.identity.sub\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-missing-authentication","title":"Try the API missing authentication","text":"
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak-users\"\n# www-authenticate: Bearer realm=\"k8s-service-accounts\"\n# x-ext-auth-reason: {\"k8s-service-accounts\":\"credential not found\",\"keycloak-users\":\"credential not found\"}\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-without-permission","title":"Try the API without permission","text":"

    Obtain an access token with the Keycloak server:

    ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

    Send a request to the API as the Keycloak-authenticated user while still missing permissions:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 403 Forbidden\n

    Create a Kubernetes Service Account to represent a consumer of the API associated with the alternative source of identities k8s-service-accounts:

    kubectl apply -f - <<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: client-app-1\nEOF\n

    Obtain an access token for the client-app-1 service account:

    SA_TOKEN=$(kubectl create token client-app-1)\n

    Send a request to the API as the service account while still missing permissions:

    curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 403 Forbidden\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#grant-access-to-the-toy-store-api-for-user-and-service-account","title":"Grant access to the Toy Store API for user and service account","text":"

    Create the toystore-reader and toystore-writer roles:

    kubectl apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: toystore-reader\nrules:\n\n- nonResourceURLs: [\"/toy*\"]\n  verbs: [\"get\"]\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: toystore-writer\nrules:\n- nonResourceURLs: [\"/admin/toy\"]\n  verbs: [\"post\", \"delete\"]\nEOF\n

    Add permissions to the user and service account:

    User Kind Roles john User registered in Keycloak toystore-reader, toystore-writer client-app-1 Kuberentes Service Account toystore-reader
    kubectl apply -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: toystore-readers\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: toystore-reader\nsubjects:\n\n- kind: User\n  name: $(jq -R -r 'split(\".\") | .[1] | @base64d | fromjson | .sub' <<< \"$ACCESS_TOKEN\")\n- kind: ServiceAccount\n  name: client-app-1\n  namespace: default\n---\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: toystore-writers\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: toystore-writer\nsubjects:\n- kind: User\n  name: $(jq -R -r 'split(\".\") | .[1] | @base64d | fromjson | .sub' <<< \"$ACCESS_TOKEN\")\nEOF\n
    Q: Can I use Roles and RoleBindings instead of ClusterRoles and ClusterRoleBindings?

    Yes, you can.

    The example above is for non-resource URL Kubernetes roles. For using Roles and RoleBindings instead of ClusterRoles and ClusterRoleBindings, thus more flexible resource-based permissions to protect the API, see the spec for Kubernetes SubjectAccessReview authorization in the Authorino docs.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-with-permission","title":"Try the API with permission","text":"

    Send requests to the API as the Keycloak-authenticated user:

    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
    curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' -X POST http://$GATEWAY_URL/admin/toy -i\n# HTTP/1.1 200 OK\n

    Send requests to the API as the Kubernetes service account:

    curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
    curl -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' -X POST http://$GATEWAY_URL/admin/toy -i\n# HTTP/1.1 403 Forbidden\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"Enforce rate limiting on requests to the Toy Store API","text":"

    Create a Kuadrant RateLimitPolicy to configure rate limiting:

    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  limits:\n    \"per-user\":\n      rates:\n\n      - limit: 5\n        window: 10s\n      counters:\n      - expression: auth.identity.userid\nEOF\n

    Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#try-the-api-rate-limited","title":"Try the API rate limited","text":"

    Each user should be entitled to a maximum of 5 requests every 10 seconds.

    Note: If the tokens have expired, you may need to refresh them first.

    Send requests as the Keycloak-authenticated user:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $ACCESS_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

    Send requests as the Kubernetes service account:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H \"Authorization: Bearer $SA_TOKEN\" -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-with-jwt-and-k8s-authnz/#cleanup","title":"Cleanup","text":"
    make local-cleanup\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/","title":"Gateway Rate Limiting for Cluster Operators","text":"

    For more info on the different personas see Gateway API

    This user guide walks you through an example of how to configure rate limiting for all routes attached to a specific ingress gateway.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#setup-the-environment","title":"Setup the environment","text":"

    Follow this setup doc to set up your environment before continuing with this doc.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#deploy-the-toystore-example-api","title":"Deploy the Toystore example API:","text":"
    kubectl apply -f examples/toystore/toystore.yaml\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#create-the-ingress-gateways","title":"Create the ingress gateways","text":"
    kubectl -n gateway-system apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: external\n  annotations:\n    kuadrant.io/namespace: kuadrant-system\n    networking.istio.io/service-type: ClusterIP\nspec:\n  gatewayClassName: istio\n  listeners:\n\n  - name: external\n    port: 80\n    protocol: HTTP\n    hostname: '*.io'\n    allowedRoutes:\n      namespaces:\n        from: All\n---\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: internal\n  annotations:\n    kuadrant.io/namespace: kuadrant-system\n    networking.istio.io/service-type: ClusterIP\nspec:\n  gatewayClassName: istio\n  listeners:\n  - name: local\n    port: 80\n    protocol: HTTP\n    hostname: '*.local'\n    allowedRoutes:\n      namespaces:\n        from: All\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#enforce-rate-limiting-on-requests-incoming-through-the-external-gateway","title":"Enforce rate limiting on requests incoming through the external gateway","text":"
        \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n    \u2502 (Gateway) \u2502      \u2502 (Gateway) \u2502\n    \u2502  external \u2502      \u2502  internal \u2502\n    \u2502           \u2502      \u2502           \u2502\n    \u2502   *.io    \u2502      \u2502  *.local  \u2502\n    \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n          \u25b2\n          \u2502\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u2502 (RateLimitPolicy) \u2502\n\u2502       gw-rlp      \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n

    Create a Kuadrant RateLimitPolicy to configure rate limiting:

    kubectl apply -n gateway-system -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: gw-rlp\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: external\n  limits:\n    \"global\":\n      rates:\n\n      - limit: 5\n        window: 10s\nEOF\n

    Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#deploy-a-sample-api-to-test-rate-limiting-enforced-at-the-level-of-the-gateway","title":"Deploy a sample API to test rate limiting enforced at the level of the gateway","text":"
                               \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n\u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510      \u2502 (Gateway) \u2502      \u2502 (Gateway) \u2502\n\u2502 (RateLimitPolicy) \u2502      \u2502  external \u2502      \u2502  internal \u2502\n\u2502       gw-rlp      \u251c\u2500\u2500\u2500\u2500\u2500\u25ba\u2502           \u2502      \u2502           \u2502\n\u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518      \u2502   *.io    \u2502      \u2502  *.local  \u2502\n                           \u2514\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518      \u2514\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2518\n                                 \u2502                  \u2502\n                                 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                                           \u2502\n                                 \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                                 \u2502   (HTTPRoute)    \u2502\n                                 \u2502     toystore     \u2502\n                                 \u2502                  \u2502\n                                 \u2502 *.toystore.io    \u2502\n                                 \u2502 *.toystore.local \u2502\n                                 \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252c\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n                                          \u2502\n                                   \u250c\u2500\u2500\u2500\u2500\u2500\u2500\u2534\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510\n                                   \u2502   (Service)  \u2502\n                                   \u2502   toystore   \u2502\n                                   \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#route-traffic-to-the-api-from-both-gateways","title":"Route traffic to the API from both gateways:","text":"
    kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: external\n    namespace: gateway-system\n  - name: internal\n    namespace: gateway-system\n  hostnames:\n  - \"*.toystore.io\"\n  - \"*.toystore.local\"\n  rules:\n  - backendRefs:\n    - name: toystore\n      port: 80\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#verify-the-rate-limiting-works-by-sending-requests-in-a-loop","title":"Verify the rate limiting works by sending requests in a loop","text":"

    Expose the gateways, respectively at the port numbers 9081 and 9082 of the local host:

    kubectl port-forward -n gateway-system service/external-istio 9081:80 >/dev/null 2>&1 &\nkubectl port-forward -n gateway-system service/internal-istio 9082:80 >/dev/null 2>&1 &\n

    Up to 5 successful (200 OK) requests every 10 seconds through the external ingress gateway (*.io), then 429 Too Many Requests:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.io' http://localhost:9081 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

    Unlimited successful (200 OK) through the internal ingress gateway (*.local):

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.local' http://localhost:9082 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/gateway-rl-for-cluster-operators/#cleanup","title":"Cleanup","text":"
    make local-cleanup\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/","title":"Gateway Rate Limiting","text":"

    This user guide walks you through an example of how to configure multiple rate limit polices for different listeners in an ingress gateway.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#setup-the-environment","title":"Setup the environment","text":"

    Follow this setup doc to set up your environment before continuing with this doc.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#deploy-the-sample-api","title":"Deploy the sample API:","text":"
    kubectl apply -f examples/toystore/toystore.yaml\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#create-the-ingress-gateways","title":"Create the ingress gateways","text":"
    kubectl -n kuadrant-system apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: environment\n  annotations:\n    kuadrant.io/namespace: kuadrant-system\n    networking.istio.io/service-type: ClusterIP\nspec:\n  gatewayClassName: istio\n  listeners:\n\n  - name: external\n    port: 80\n    protocol: HTTP\n    hostname: '*.io'\n    allowedRoutes:\n      namespaces:\n        from: All\n  - name: local\n    port: 80\n    protocol: HTTP\n    hostname: '*.local'\n    allowedRoutes:\n      namespaces:\n        from: All\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#route-traffic-to-the-api-from-both-gateways-listeners","title":"Route traffic to the API from both gateways listeners","text":"
    kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: environment\n    namespace: kuadrant-system\n  hostnames:\n  - \"*.toystore.io\"\n  - \"*.toystore.local\"\n  rules:\n  - backendRefs:\n    - name: toystore\n      port: 80\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#create-a-kuadrant-ratelimitpolicy-to-configure-rate-limiting-for-the-external-listener","title":"Create a Kuadrant RateLimitPolicy to configure rate limiting for the external listener:","text":"
    kubectl apply -n kuadrant-system -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: RateLimitPolicy\nmetadata:\n  name: gw-rlp-external\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: environment\n    sectionName: external\n  defaults:\n    strategy: merge\n    limits:\n      \"external\":\n        rates:\n\n        - limit: 2\n          window: 10s\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#create-a-kuadrant-ratelimitpolicy-to-configure-rate-limiting-for-the-local-listener","title":"Create a Kuadrant RateLimitPolicy to configure rate limiting for the local listener:","text":"
    kubectl apply -n kuadrant-system -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: RateLimitPolicy\nmetadata:\n  name: gw-rlp-local\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: Gateway\n    name: environment\n    sectionName: local\n  defaults:\n    strategy: merge\n    limits:\n      \"local\":\n        rates:\n\n        - limit: 5\n          window: 10s\nEOF\n

    Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#verify-the-rate-limiting-works-by-sending-requests-in-a-loop","title":"Verify the rate limiting works by sending requests in a loop","text":"

    Expose the gateways, respectively at the port numbers 9081 and 9082 of the local host:

    kubectl port-forward -n gateway-system service/environment-istio 9081:80 >/dev/null 2>&1 &\n

    Up to 5 successful (200 OK) requests every 10 seconds through the external ingress gateway (*.io), then 429 Too Many Requests:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.io' http://localhost:9081 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

    Unlimited successful (200 OK) through the internal ingress gateway (*.local):

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.local' http://localhost:9081 | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-diff-section/#cleanup","title":"Cleanup","text":"
    make local-cleanup\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/","title":"Multi authenticated Rate Limiting for an Application","text":"

    This user guide walks you through an example of how to configure multiple authenticated rate limiting for an application using Kuadrant.

    Authenticated rate limiting, rate limits the traffic directed to an application based on attributes of the client user, who is authenticated by some authentication method. A few examples of authenticated rate limiting use cases are:

    • User A can send up to 50rps (\"requests per second\"), while User B can send up to 100rps.
    • Each user can send up to 20rpm (\"request per minute\").
    • Admin users (members of the 'admin' group) can send up to 100rps, while regular users (non-admins) can send up to 20rpm and no more than 5rps.

    In this guide, we will rate limit a sample REST API called Toy Store, an echo service that echoes back to the user whatever attributes it gets in the request. The API exposes an endpoint at GET http://api.toystore.com/toy, to mimic an operation of reading toy records.

    We will define 2 users of the API, which can send requests to the API at different rates, based on their user IDs. The authentication method used is API key.

    User ID Rate limit alice 5rp10s (\"5 requests every 10 seconds\") bob 2rp10s (\"2 requests every 10 seconds\")"},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#setup-the-environment","title":"Setup the environment","text":"

    Follow this setup doc to set up your environment before continuing with this doc.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#deploy-the-toy-store-api","title":"Deploy the Toy Store API","text":"
    kubectl apply -f examples/toystore/toystore.yaml\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#create-a-httproute-to-route-traffic-to-the-service-via-istio-ingress-gateway","title":"Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:","text":"
    kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - api.toystore.com\n  rules:\n  - matches:\n    - path:\n        type: Exact\n        value: \"/toy\"\n      method: GET\n    - path:\n        type: Exact\n        value: \"/car\"\n      method: GET\n    backendRefs:\n    - name: toystore\n      port: 80\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#export-the-gateway-hostname-and-port","title":"Export the gateway hostname and port:","text":"
    export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#verify-the-route-works","title":"Verify the route works:","text":"
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n

    Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:

    kubectl port-forward -n gateway-system service/kuadrant-ingressgateway-istio 9080:80 >/dev/null 2>&1 &\nexport GATEWAY_URL=localhost:9080\n
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 200 OK\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#enforce-authentication-on-requests-to-the-toy-store-api","title":"Enforce authentication on requests to the Toy Store API","text":"

    Create a Kuadrant AuthPolicy to configure the authentication:

    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: AuthPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n  rules:\n    authentication:\n      \"api-key-users\":\n        apiKey:\n          selector:\n            matchLabels:\n              app: toystore\n          allNamespaces: true\n        credentials:\n          authorizationHeader:\n            prefix: APIKEY\n    response:\n      success:\n        filters:\n          \"identity\":\n            json:\n              properties:\n                \"userid\":\n                  selector: auth.identity.metadata.annotations.secret\\.kuadrant\\.io/user-id\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#verify-the-authentication-works-by-sending-a-request-to-the-toy-store-api-without-api-key","title":"Verify the authentication works by sending a request to the Toy Store API without API key:","text":"
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-key-users\"\n# x-ext-auth-reason: \"credential not found\"\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#create-api-keys-for-users-alice-and-bob-to-authenticate","title":"Create API keys for users alice and bob to authenticate:","text":"

    Note: Kuadrant stores API keys as Kubernetes Secret resources. User metadata can be stored in the annotations of the resource.

    kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: bob-key\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: bob\nstringData:\n  api_key: IAMBOB\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: alice-key\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: toystore\n  annotations:\n    secret.kuadrant.io/user-id: alice\nstringData:\n  api_key: IAMALICE\ntype: Opaque\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#enforce-authenticated-rate-limiting-on-requests-to-the-toy-store-api","title":"Enforce authenticated rate limiting on requests to the Toy Store API","text":"

    Create Kuadrant RateLimitPolicy's to configure rate limiting for Bob and Alice:

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#bob","title":"Bob","text":"
    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: RateLimitPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n    sectionName: rule-1\n  defaults:\n    strategy: merge\n    limits:\n      \"bob-limit\":\n        rates:\n\n        - limit: 2\n          window: 10s\n        when:\n        - predicate: \"auth.identity.userid == 'bob'\"\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#alice","title":"Alice","text":"
    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1beta3\nkind: RateLimitPolicy\nmetadata:\n  name: toystore-alice\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n    sectionName: rule-1\n  defaults:\n    strategy: merge\n    limits:\n      \"alice-limit\":\n        rates:\n\n        - limit: 5\n          window: 10s\n        when:\n        - predicate: \"auth.identity.userid == 'alice'\"\nEOF\n

    Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

    Verify the rate limiting works by sending requests as Alice and Bob.

    Up to 5 successful (200 OK) requests every 10 seconds allowed for Alice, then 429 Too Many Requests:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMALICE' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

    Up to 2 successful (200 OK) requests every 10 seconds allowed for Bob, then 429 Too Many Requests:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Authorization: APIKEY IAMBOB' -H 'Host: api.toystore.com' http://$GATEWAY_URL/toy | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/multi-auth-rlp-same-section/#cleanup","title":"Cleanup","text":"
    make local-cleanup\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/","title":"Simple Rate Limiting for Application developers","text":"

    For more info on the different personas see Gateway API

    This user guide walks you through an example of how to configure rate limiting for an endpoint of an application using Kuadrant.

    In this guide, we will rate limit a sample REST API called Toy Store. In reality, this API is just an echo service that echoes back to the user whatever attributes it gets in the request. The API listens to requests at the hostname api.toystore.com, where it exposes the endpoints GET /toys* and POST /toys, respectively, to mimic operations of reading and writing toy records.

    We will rate limit the POST /toys endpoint to a maximum of 5rp10s (\"5 requests every 10 seconds\").

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/#setup-the-environment","title":"Setup the environment","text":"

    Follow this setup doc to set up your environment before continuing with this doc.

    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/#deploy-the-toy-store-api","title":"Deploy the Toy Store API","text":"

    Create the deployment:

    kubectl apply -f examples/toystore/toystore.yaml\n

    Create a HTTPRoute to route traffic to the service via Istio Ingress Gateway:

    kubectl apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: kuadrant-ingressgateway\n    namespace: gateway-system\n  hostnames:\n  - api.toystore.com\n  rules:\n  - matches:\n    - method: GET\n      path:\n        type: PathPrefix\n        value: \"/toys\"\n    backendRefs:\n    - name: toystore\n      port: 80\n  - matches: # it has to be a separate HTTPRouteRule so we do not rate limit other endpoints\n    - method: POST\n      path:\n        type: Exact\n        value: \"/toys\"\n    backendRefs:\n    - name: toystore\n      port: 80\nEOF\n

    Export the gateway hostname and port:

    export INGRESS_HOST=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.status.addresses[0].value}')\nexport INGRESS_PORT=$(kubectl get gtw kuadrant-ingressgateway -n gateway-system -o jsonpath='{.spec.listeners[?(@.name==\"http\")].port}')\nexport GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT\n

    Verify the route works:

    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -i\n# HTTP/1.1 200 OK\n

    Note: If the command above fails to hit the Toy Store API on your environment, try forwarding requests to the service and accessing over localhost:

    kubectl port-forward -n gateway-system service/kuadrant-ingressgateway-istio 9080:80 >/dev/null 2>&1 &\nexport GATEWAY_URL=localhost:9080\n
    curl -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -i\n# HTTP/1.1 200 OK\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/#enforce-rate-limiting-on-requests-to-the-toy-store-api","title":"Enforce rate limiting on requests to the Toy Store API","text":"

    Create a Kuadrant RateLimitPolicy to configure rate limiting:

    kubectl apply -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: RateLimitPolicy\nmetadata:\n  name: toystore\nspec:\n  targetRef:\n    group: gateway.networking.k8s.io\n    kind: HTTPRoute\n    name: toystore\n    sectionName: rule-2\n  limits:\n    \"create-toy\":\n      rates:\n\n      - limit: 5\n        window: 10s\n      when:\n      - predicate: \"request.method == 'POST'\"\nEOF\n

    Note: It may take a couple of minutes for the RateLimitPolicy to be applied depending on your cluster.

    Verify the rate limiting works by sending requests in a loop.

    Up to 5 successful (200 OK) requests every 10 seconds to POST /toys, then 429 Too Many Requests:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys -X POST | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n

    Unlimited successful (200 OK) to GET /toys:

    while :; do curl --write-out '%{http_code}\\n' --silent --output /dev/null -H 'Host: api.toystore.com' http://$GATEWAY_URL/toys | grep -E --color \"\\b(429)\\b|$\"; sleep 1; done\n
    "},{"location":"kuadrant-operator/doc/user-guides/ratelimiting/simple-rl-for-app-developers/#cleanup","title":"Cleanup","text":"
    make local-cleanup\n
    "},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/","title":"Gateway TLS for Cluster Operators","text":"

    This user guide walks you through an example of how to configure TLS for all routes attached to an ingress gateway.

    "},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#requisites","title":"Requisites","text":"
    • Docker
    "},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#setup","title":"Setup","text":"

    This step uses tooling from the Kuadrant Operator component to create a containerized Kubernetes server locally using Kind, where it installs Istio, Kubernetes Gateway API, CertManager and Kuadrant itself.

    Clone the project:

    git clone https://github.com/Kuadrant/kuadrant-operator && cd kuadrant-operator\n

    Setup the environment:

    make local-setup\n

    Create a namespace:

    kubectl create namespace my-gateways\n
    "},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#create-an-ingress-gateway","title":"Create an ingress gateway","text":"

    Create a gateway:

    kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: Gateway\nmetadata:\n  name: prod-web\nspec:\n  gatewayClassName: istio\n  listeners:\n\n    - allowedRoutes:\n        namespaces:\n          from: All\n      name: api\n      hostname: \"*.toystore.local\"\n      port: 443\n      protocol: HTTPS\n      tls:\n        mode: Terminate\n        certificateRefs:\n          - name: toystore-local-tls\n            kind: Secret\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#enable-tls-on-the-gateway","title":"Enable TLS on the gateway","text":"

    The TLSPolicy requires a reference to an existing CertManager Issuer.

    Create a CertManager Issuer:

    kubectl apply -n my-gateways -f - <<EOF\napiVersion: cert-manager.io/v1\nkind: Issuer\nmetadata:\n  name: selfsigned-issuer\nspec:\n  selfSigned: {}\nEOF\n

    Note: We are using a self-signed issuer here but any supported CerManager issuer or cluster issuer can be used.

    kubectl get issuer selfsigned-issuer -n my-gateways\n

    Response:

    NAME                        READY   AGE\nselfsigned-issuer   True    18s\n

    Create a Kuadrant TLSPolicy to configure TLS:

    kubectl apply -n my-gateways -f - <<EOF\napiVersion: kuadrant.io/v1\nkind: TLSPolicy\nmetadata:\n  name: prod-web\nspec:\n  targetRef:\n    name: prod-web\n    group: gateway.networking.k8s.io\n    kind: Gateway\n  issuerRef:\n    group: cert-manager.io\n    kind: Issuer\n    name: selfsigned-issuer\nEOF\n

    Check policy status:

    kubectl get tlspolicy -o wide -n my-gateways\n

    Response:

    NAME       STATUS     TARGETREFKIND   TARGETREFNAME   AGE\nprod-web   Accepted   Gateway         prod-web        13s\n

    Check a Certificate resource was created:

    kubectl get certificates -n my-gateways\n

    Response

    NAME                 READY   SECRET               AGE\ntoystore-local-tls   True    toystore-local-tls   7m30s\n

    Check a TLS Secret resource was created:

    kubectl get secrets -n my-gateways --field-selector=\"type=kubernetes.io/tls\"\n

    Response:

    NAME                 TYPE                DATA   AGE\ntoystore-local-tls   kubernetes.io/tls   3      7m42s\n
    "},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#deploy-a-sample-api-to-test-tls","title":"Deploy a sample API to test TLS","text":"

    Deploy the sample API:

    kubectl -n my-gateways apply -f examples/toystore/toystore.yaml\nkubectl -n my-gateways wait --for=condition=Available deployments toystore --timeout=60s\n

    Route traffic to the API from our gateway:

    kubectl -n my-gateways apply -f - <<EOF\napiVersion: gateway.networking.k8s.io/v1\nkind: HTTPRoute\nmetadata:\n  name: toystore\nspec:\n  parentRefs:\n\n  - name: prod-web\n    namespace: my-gateways\n  hostnames:\n  - \"*.toystore.local\"\n  rules:\n  - backendRefs:\n    - name: toystore\n      port: 80\nEOF\n
    "},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#verify-tls-works-by-sending-requests","title":"Verify TLS works by sending requests","text":"

    Get the gateway address@

    GWADDRESS=`kubectl get gateway/prod-web -n my-gateways -o=jsonpath='{.status.addresses[?(@.type==\"IPAddress\")].value}'`\necho $GWADDRESS\n

    Response:

    172.18.200.1\n

    Verify we can access the service via TLS:

    curl -vkI https://api.toystore.local --resolve \"api.toystore.local:443:$GWADDRESS\"\n

    Response:

    * Added api.toystore.local:443:172.18.200.1 to DNS cache\n* Hostname api.toystore.local was found in DNS cache\n*   Trying 172.18.200.1:443...\n* Connected to api.toystore.local (172.18.200.1) port 443 (#0)\n* ALPN: offers h2\n* ALPN: offers http/1.1\n* TLSv1.0 (OUT), TLS header, Certificate Status (22):\n* TLSv1.3 (OUT), TLS handshake, Client hello (1):\n* TLSv1.2 (IN), TLS header, Certificate Status (22):\n* TLSv1.3 (IN), TLS handshake, Server hello (2):\n* TLSv1.2 (IN), TLS header, Finished (20):\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):\n* TLSv1.3 (IN), TLS handshake, Certificate (11):\n* TLSv1.3 (IN), TLS handshake, CERT verify (15):\n* TLSv1.3 (IN), TLS handshake, Finished (20):\n* TLSv1.2 (OUT), TLS header, Finished (20):\n* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.3 (OUT), TLS handshake, Finished (20):\n* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384\n* ALPN: server accepted h2\n* Server certificate:\n*  subject: [NONE]\n*  start date: Feb 15 11:46:50 2024 GMT\n*  expire date: May 15 11:46:50 2024 GMT\n* Using HTTP2, server supports multiplexing\n* Copying HTTP/2 data in stream buffer to connection buffer after upgrade: len=0\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* h2h3 [:method: HEAD]\n* h2h3 [:path: /]\n* h2h3 [:scheme: https]\n* h2h3 [:authority: api.toystore.local]\n* h2h3 [user-agent: curl/7.85.0]\n* h2h3 [accept: */*]\n* Using Stream ID: 1 (easy handle 0x5623e4fe5bf0)\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n> HEAD / HTTP/2\n> Host: api.toystore.local\n> user-agent: curl/7.85.0\n> accept: */*\n>\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):\n* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):\n* old SSL session ID is stale, removing\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n* Connection state changed (MAX_CONCURRENT_STREAMS == 2147483647)!\n* TLSv1.2 (OUT), TLS header, Supplemental data (23):\n* TLSv1.2 (IN), TLS header, Supplemental data (23):\n< HTTP/2 200\nHTTP/2 200\n< content-type: application/json\ncontent-type: application/json\n< server: istio-envoy\nserver: istio-envoy\n< date: Thu, 15 Feb 2024 12:13:27 GMT\ndate: Thu, 15 Feb 2024 12:13:27 GMT\n< content-length: 1658\ncontent-length: 1658\n< x-envoy-upstream-service-time: 1\nx-envoy-upstream-service-time: 1\n\n<\n\n* Connection #0 to host api.toystore.local left intact\n
    "},{"location":"kuadrant-operator/doc/user-guides/tls/gateway-tls/#cleanup","title":"Cleanup","text":"
    make local-cleanup\n
    "},{"location":"authorino/docs/","title":"Documentation","text":""},{"location":"authorino/docs/#getting-started","title":"Getting started","text":""},{"location":"authorino/docs/#terminology","title":"Terminology","text":""},{"location":"authorino/docs/#architecture","title":"Architecture","text":""},{"location":"authorino/docs/#feature-description","title":"Feature description","text":""},{"location":"authorino/docs/#user-guides","title":"User guides","text":""},{"location":"authorino/docs/#developers-guide","title":"Developer\u2019s guide","text":""},{"location":"authorino/docs/architecture/","title":"Architecture","text":""},{"location":"authorino/docs/architecture/#overview","title":"Overview","text":"

    There are a few concepts to understand Authorino's architecture. The main components are: Authorino, Envoy and the Upstream service to be protected. Envoy proxies requests to the configured virtual host upstream service, first contacting with Authorino to decide on authN/authZ.

    The topology can vary from centralized proxy and centralized authorization service, to dedicated sidecars, with the nuances in between. Read more about the topologies in the Topologies section below.

    Authorino is deployed using the Authorino Operator, from an Authorino Kubernetes custom resource. Then, from another kind of custom resource, the AuthConfig CRs, each Authorino instance reads and adds to the index the exact rules of authN/authZ to enforce for each protected host (\"index reconciliation\").

    Everything that the AuthConfig reconciler can fetch in reconciliation-time is stored in the index. This is the case of static parameters such as signing keys, authentication secrets and authorization policies from external policy registries.

    AuthConfigs can refer to identity providers (IdP) and trusted auth servers whose access tokens will be accepted to authenticate to the protected host. Consumers obtain an authentication token (short-lived access token or long-lived API key) and send those in the requests to the protected service.

    When Authorino is triggered by Envoy via the gRPC interface, it starts evaluating the Auth Pipeline, i.e. it applies to the request the parameters to verify the identity and to enforce authorization, as found in the index for the requested host (See host lookup for details).

    Apart from static rules, these parameters can include instructions to contact online with external identity verifiers, external sources of metadata and policy decision points (PDPs).

    On every request, Authorino's \"working memory\" is called Authorization JSON, a data structure that holds information about the context (the HTTP request) and objects from each phase of the auth pipeline: i.e., authentication verification (phase i), ad-hoc metadata fetching (phase ii), authorization policy enforcement (phase iii), dynamic response (phase iv), and callbacks (phase v). The evaluators in each of these phases can both read and write from the Authorization JSON for dynamic steps and decisions of authN/authZ.

    "},{"location":"authorino/docs/architecture/#topologies","title":"Topologies","text":"

    Typically, upstream APIs are deployed to the same Kubernetes cluster and namespace where the Envoy proxy and Authorino is running (although not necessarily). Whatever is the case, Envoy must be proxying to the upstream API (see Envoy's HTTP route components and virtual hosts) and pointing to Authorino in the external authorization filter.

    This can be achieved with different topologies:

    • Envoy can be a centralized gateway with one dedicated instance of Authorino, proxying to one or more upstream services
    • Envoy can be deployed as a sidecar of each protected service, but still contacting from a centralized Authorino authorization service
    • Both Envoy and Authorino deployed as sidecars of the protected service, restricting all communication between them to localhost

    Each topology above induces different measures for security.

    "},{"location":"authorino/docs/architecture/#centralized-gateway","title":"Centralized gateway","text":"

    Recommended in the protected services to validate the origin of the traffic. It must have been proxied by Envoy. See Authorino JSON injection for an extra validation option using a shared secret passed in HTTP header.

    "},{"location":"authorino/docs/architecture/#centralized-authorization-service","title":"Centralized authorization service","text":"

    Protected service should only listen on localhost and all traffic can be considered safe.

    "},{"location":"authorino/docs/architecture/#sidecars","title":"Sidecars","text":"

    Recommended namespaced instances of Authorino with fine-grained label selectors to avoid unnecessary caching of AuthConfigs.

    Apart from that, protected service should only listen on localhost and all traffic can be considered safe.

    "},{"location":"authorino/docs/architecture/#cluster-wide-vs-namespaced-instances","title":"Cluster-wide vs. Namespaced instances","text":"

    Authorino instances can run in either cluster-wide or namespaced mode.

    Namespace-scoped instances only watch resources (AuthConfigs and Secrets) created in a given namespace. This deployment mode does not require admin privileges over the Kubernetes cluster to deploy the instance of the service (given Authorino's CRDs have been installed beforehand, such as when Authorino is installed using the Authorino Operator).

    Cluster-wide deployment mode, in contraposition, deploys instances of Authorino that watch resources across the entire cluster, consolidating all resources into a multi-namespace index of auth configs. Admin privileges over the Kubernetes cluster is required to deploy Authorino in cluster-wide mode.

    Be careful to avoid superposition when combining multiple Authorino instances and instance modes in the same Kubernetes cluster. Apart from caching unnecessary auth config data in the instances depending on your routing settings, the leaders of each instance (set of replicas) may compete for updating the status of the custom resources that are reconciled. See Resource reconciliation and status update for more information.

    If necessary, use label selectors to narrow down the space of resources watched and reconciled by each Authorino instance. Check out the Sharding section below for details.

    "},{"location":"authorino/docs/architecture/#the-authorino-authconfig-custom-resource-definition-crd","title":"The Authorino AuthConfig Custom Resource Definition (CRD)","text":"

    The desired protection for a service is declaratively stated by applying an AuthConfig Custom Resource to the Kubernetes cluster running Authorino.

    An AuthConfig resource typically looks like the following:

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\nspec:\n    # The list of public host names of the services protected by this AuthConfig resource.\n    # Authorino uses the host name provided in the payload of external authorization request to lookup for the corresponding AuthConfig to enforce.\n  # Hostname collisions are prevented by rejecting to index a hostname already taken by another AuthConfig.\n  # Format: hostname[:port]\n  hosts:\n\n  - my-api.io:443 # north-south traffic\n  - my-api.ns.svc.cluster.local # east-west traffic\n\n  # Set of stored named patterns to be reused in conditions and pattern-matching authorization rules\n  patterns: {\"name\" \u2192 {selector, operator, value}, \u2026}\n\n  # Top-level conditions for the AuthConfig to be enforced.\n  # If omitted, the AuthConfig will be enforced at all requests.\n  # If present, all conditions must match for the AuthConfig to be enforced; otherwise, Authorino skips the AuthConfig and returns to the auth request with status OK.\n  when: [{selector, operator, value | named pattern ref}, \u2026]\n\n  # List of one or more trusted sources of identity:\n  # - Configurations to verify JSON Web Tokens (JWTs) issued by an OpenID Connect (OIDC) server\n  # - Endpoints for OAuth 2.0 token introspection\n  # - Attributes for the Kubernetes `TokenReview` API\n  # - Label selectors for API keys (stored in Kubernetes `Secret`s)\n  # - Label selectors trusted x509 issuer certificates (stored in Kubernetes `Secret`s)\n  # - Selectors for plain identity objects supplied in the payload of the authorization request\n  # - Anonymous access configs\n  authentication: {\"name\" \u2192 {\u2026}, \u2026}\n\n  # List of sources of external metadata for the authorization (optional):\n  # - Endpoints for HTTP GET or GET-by-POST requests\n  # - OIDC UserInfo endpoints (associated with an OIDC token issuer specified in the authentication configs)\n  # - User-Managed Access (UMA) resource registries\n  metadata: {\"name\" \u2192 {\u2026}, \u2026}\n\n  # List of authorization policies to be enforced (optional):\n  # - Pattern-matching rules (e.g. `context.request.http.path eq '/pets'`)\n  # - Open Policy Agent (OPA) inline or external Rego policies\n  # - Attributes for the Kubernetes `SubjectAccessReview` API\n  # \u2013 Attributes for authorization with an external SpiceDB server\n  authorization: {\"name\" \u2192 {\u2026}, \u2026}\n\n  # Customization to the response to the external authorization request (optional)\n  response:\n    # List of dynamic response elements into the request on success authoization (optional):\n    # - Plain text\n    # - JSON objects\n    # - Festival Wristbands (signed JWTs issued by Authorino)\n    success:\n      # List of HTTP headers to inject into the request post-authorization (optional):\n      headers: {\"name\" \u2192 {\u2026}, \u2026}\n\n      # List of Envoy Dynamic Metadata to inject into the request post-authorization (optional):\n      dynamicMetadata: {\"name\" \u2192 {\u2026}, \u2026}\n\n    # Custom HTTP status code, message and headers to replace the default `401 Unauthorized` response (optional)\n    unauthenticated:\n      code: 302\n      message: Redirecting to login\n      headers:\n        \"Location\":\n          value: https://my-app.io/login\n\n    # Custom HTTP status code, message and headers to replace the default `and `403 Forbidden` response (optional)\n    unauthorized: {code, message, headers, body}\n\n  # List of callback targets:\n  # - Endpoints for HTTP requests\n  callbacks: {\"name\" \u2192 {\u2026}, \u2026}\n

    Check out the OAS of the AuthConfig CRD for a formal specification of the options for authentication verification, external metadata fetching, authorization policies, and dynamic response, as well as any other host protection capability implemented by Authorino.

    You can also read the specification from the CLI using the kubectl explain command. The Authorino CRD is required to have been installed in Kubernetes cluster. E.g. kubectl explain authconfigs.spec.authentication.overrides.

    A complete description of supported features and corresponding configuration options within an AuthConfig CR can be found in the Features page.

    More concrete examples of AuthConfigs for specific use-cases can be found in the User guides.

    "},{"location":"authorino/docs/architecture/#resource-reconciliation-and-status-update","title":"Resource reconciliation and status update","text":"

    The instances of the Authorino authorization service workload, following the Operator pattern, watch events related to the AuthConfig custom resources, to build and reconcile an in-memory index of configs. Whenever a replica receives traffic for authorization request, it looks up in the index of AuthConfigs and then triggers the \"Auth Pipeline\", i.e. enforces the associated auth spec onto the request.

    An instance can be a single authorization service workload or a set of replicas. All replicas watch and reconcile the same set of resources that match the --auth-config-label-selector and --secret-label-selector configuration options. (See both Cluster-wide vs. Namespaced instances and Sharding, for details about defining the reconciliation space of Authorino instances.)

    The above means that all replicas of an Authorino instance should be able to receive traffic for authorization requests.

    Among the multiple replicas of an instance, Authorino elects one replica to be leader. The leader is responsible for updating the status of reconciled AuthConfigs. If the leader eventually becomes unavailable, the instance will automatically elect another replica take its place as the new leader.

    The status of an AuthConfig tells whether the resource is \"ready\" (i.e. indexed). It also includes summary information regarding the numbers of authentication configs, metadata configs, authorization configs and response configs within the spec, as well as whether Festival Wristband tokens are being issued by the Authorino instance as by spec.

    Apart from watching events related to AuthConfig custom resources, Authorino also watches events related to Kubernetes Secrets, as part of Authorino's API key authentication feature. Secret resources that store API keys are linked to their corresponding AuthConfigs in the index. Whenever the Authorino instance detects a change in the set of API key Secrets linked to an AuthConfigs, the instance reconciles the index.

    Authorino only watches events related to Secrets whose metadata.labels match the label selector --secret-label-selector of the Authorino instance. The default values of the label selector for Kubernetes Secrets representing Authorino API keys is authorino.kuadrant.io/managed-by=authorino.

    "},{"location":"authorino/docs/architecture/#the-auth-pipeline-aka-enforcing-protection-in-request-time","title":"The \"Auth Pipeline\" (aka: enforcing protection in request-time)","text":"

    In each request to the protected API, Authorino triggers the so-called \"Auth Pipeline\", a set of configured evaluators that are organized in a 5-phase pipeline:

    • (i) Authentication phase: at least one source of identity (i.e., one authentication config) must resolve the supplied credential in the request into a valid identity or Authorino will otherwise reject the request as unauthenticated (401 HTTP response status).
    • (ii) Metadata phase: optional fetching of additional data from external sources, to add up to context and identity information, and used in authorization policies, dynamic responses and callback requests (phases iii to v).
    • (iii) Authorization phase: all unskipped policies must evaluate to a positive result (\"authorized\"), or Authorino will otherwise reject the request as unauthorized (403 HTTP response code).
    • (iv) Response phase \u2013 Authorino builds all user-defined response items (dynamic JSON objects and/or Festival Wristband OIDC tokens), which are supplied back to the external authorization client within added HTTP headers or as Envoy Dynamic Metadata
    • (v) Callbacks phase \u2013 Authorino sends callbacks to specified HTTP endpoints.

    Each phase is sequential to the other, from (i) to (v), while the evaluators within each phase are triggered concurrently or as prioritized. The Authentication phase (i) is the only one required to list at least one evaluator (i.e. 1+ authentication configs); Metadata, Authorization and Response phases can have any number of evaluators (including zero, and even be omitted in this case).

    "},{"location":"authorino/docs/architecture/#host-lookup","title":"Host lookup","text":"

    Authorino reads the request host from Attributes.Http.Host of Envoy's CheckRequest type, and uses it as key to lookup in the index of AuthConfigs, matched against spec.hosts.

    Alternatively to Attributes.Http.Host, a host entry can be supplied in the Attributes.ContextExtensions map of the external authorino request. This will take precedence before the host attribute of the HTTP request.

    The host context extension is useful to support use cases such as of path prefix-based lookup and wildcard subdomains lookup with lookup strongly dictated by the external authorization client (e.g. Envoy), which often knows about routing and the expected AuthConfig to enforce beyond what Authorino can infer strictly based on the host name.

    Wildcards can also be used in the host names specified in the AuthConfig, resolved by Authorino. E.g. if *.pets.com is in spec.hosts, Authorino will match the concrete host names dogs.pets.com, cats.pets.com, etc. In case, of multiple possible matches, Authorino will try the longest match first (in terms of host name labels) and fall back to the closest wildcard upwards in the domain tree (if any).

    When more than one host name is specified in the AuthConfig, all of them can be used as key, i.e. all of them can be requested in the authorization request and will be mapped to the same config.

    Example. Host lookup with wildcards.

    The domain tree above induces the following relation:

    • foo.nip.io \u2192 authconfig-1 (matches *.io)
    • talker-api.nip.io \u2192 authconfig-2 (matches talker-api.nip.io)
    • dogs.pets.com \u2192 authconfig-2 (matches *.pets.com)
    • api.acme.com \u2192 authconfig-3 (matches api.acme.com)
    • www.acme.com \u2192 authconfig-4 (matches *.acme.com)
    • foo.org \u2192 404 Not found

    The host can include the port number (i.e. hostname:port) or it can be just the name of the host name. Authorino will first try finding in the index a config associated to hostname:port, as supplied in the authorization request; if the index misses an entry for hostname:port, Authorino will then remove the :port suffix and repeat the lookup using just hostname as key. This provides implicit support for multiple port numbers for a same host without having to list all combinations in the AuthConfig.

    "},{"location":"authorino/docs/architecture/#avoiding-host-name-collision","title":"Avoiding host name collision","text":"

    Authorino tries to prevent host name collision between AuthConfigs by rejecting to link in the index any AuthConfig and host name if the host name is already linked to a different AuthConfig in the index. This was intentionally designed to prevent users from superseding each other's AuthConfigs, partially or fully, by just picking the same host names or overlapping host names as others.

    When wildcards are involved, a host name that matches a host wildcard already linked in the index to another AuthConfig will be considered taken, and therefore the newest AuthConfig will be rejected to be linked to that host.

    This behavior can be disabled to allow AuthConfigs to partially supersede each others' host names (limited to strict host subsets), by supplying the --allow-superseding-host-subsets command-line flag when running the Authorino instance.

    "},{"location":"authorino/docs/architecture/#the-authorization-json","title":"The Authorization JSON","text":"

    On every Auth Pipeline, Authorino builds the Authorization JSON, a \"working-memory\" data structure composed of context (information about the request, as supplied by the Envoy proxy to Authorino) and auth (objects resolved in phases (i) to (v) of the pipeline). The evaluators of each phase can read from the Authorization JSON and implement dynamic properties and decisions based on its values.

    At phase (iii), the authorization evaluators count on an Authorization JSON payload that looks like the following:

    // The authorization JSON combined along Authorino's auth pipeline for each request\n{\n  \"context\": { // the input from the proxy\n    \"origin\": {\u2026},\n    \"request\": {\n      \"http\": {\n        \"method\": \"\u2026\",\n        \"headers\": {\u2026},\n        \"path\": \"/\u2026\",\n        \"host\": \"\u2026\",\n        \u2026\n      }\n    }\n  },\n  \"auth\": {\n    \"identity\": {\n      // the identity resolved, from the supplied credentials, by one of the evaluators of phase (i)\n    },\n    \"metadata\": {\n      // each metadata object/collection resolved by the evaluators of phase (ii), by name of the evaluator\n    }\n  }\n}\n

    The policies evaluated can use any data from the authorization JSON to define authorization rules.

    After phase (iii), Authorino appends to the authorization JSON the results of this phase as well, and the payload available for phase (iv) becomes:

    // The authorization JSON combined along Authorino's auth pipeline for each request\n{\n  \"context\": { // the input from the proxy\n    \"origin\": {\u2026},\n    \"request\": {\n      \"http\": {\n        \"method\": \"\u2026\",\n        \"headers\": {\u2026},\n        \"path\": \"/\u2026\",\n        \"host\": \"\u2026\",\n        \u2026\n      }\n    }\n  },\n  \"auth\": {\n    \"identity\": {\n      // the identity resolved, from the supplied credentials, by one of the evaluators of phase (i)\n    },\n    \"metadata\": {\n      // each metadata object/collection resolved by the evaluators of phase (ii), by name of the evaluator\n    },\n    \"authorization\": {\n      // each authorization policy result resolved by the evaluators of phase (iii), by name of the evaluator\n    }\n  }\n}\n

    Festival Wristbands and Dynamic JSON responses can include dynamic values (custom claims/properties) fetched from the authorization JSON. These can be returned to the external authorization client in added HTTP headers or as Envoy Well Known Dynamic Metadata. Check out Custom response features for details.

    For information about reading and fetching data from the Authorization JSON (syntax, functions, etc), check out Common Expression Language (CEL).

    "},{"location":"authorino/docs/architecture/#raw-http-authorization-interface","title":"Raw HTTP Authorization interface","text":"

    Besides providing the gRPC authorization interface \u2013 that implements the Envoy gRPC authorization server \u2013, Authorino also provides another interface for raw HTTP authorization. This second interface responds to GET and POST HTTP requests sent to :5001/check, and is suitable for other forms of integration, such as:

    • using Authorino as Kubernetes ValidatingWebhook service (example);
    • other HTTP proxies and API gateways;
    • old versions of Envoy incompatible with the latest version of gRPC external authorization protocol (Authorino is based on v3.19.1 of Envoy external authorization API)

    In the raw HTTP interface, the host used to lookup for an AuthConfig must be supplied in the Host HTTP header of the request. Other attributes of the HTTP request are also passed in the context to evaluate the AuthConfig, including the body of the request.

    "},{"location":"authorino/docs/architecture/#caching","title":"Caching","text":""},{"location":"authorino/docs/architecture/#openid-connect-and-user-managed-access-configs","title":"OpenID Connect and User-Managed Access configs","text":"

    OpenID Connect and User-Managed Access configurations, discovered usually at reconciliation-time from well-known discovery endpoints.

    Cached individual OpenID Connect configurations discovered by Authorino can be configured to be auto-refreshed, by setting the corresponding spec.authentication.jwt.ttl field in the AuthConfig (given in seconds, default: 0 \u2013 i.e. no cache update).

    "},{"location":"authorino/docs/architecture/#json-web-keys-jwks-and-json-web-key-sets-jwks","title":"JSON Web Keys (JWKs) and JSON Web Key Sets (JWKS)","text":"

    JSON signature verification certificates linked by discovered OpenID Connect configurations, fetched usually at reconciliation-time.

    "},{"location":"authorino/docs/architecture/#revoked-access-tokens","title":"Revoked access tokens","text":"Not implemented - In analysis (#19)

    Caching of access tokens identified and or notified as revoked prior to expiration.

    "},{"location":"authorino/docs/architecture/#external-metadata","title":"External metadata","text":"Not implemented - Planned (#21)

    Caching of resource data obtained in previous requests.

    "},{"location":"authorino/docs/architecture/#compiled-rego-policies","title":"Compiled Rego policies","text":"

    Performed automatically by Authorino at reconciliation-time for the authorization policies based on the built-in OPA module.

    Precompiled and cached individual Rego policies originally pulled by Authorino from external registries can be configured to be auto-refreshed, by setting the corresponding spec.authorization.opa.externalRegistry.ttl field in the AuthConfig (given in seconds, default: 0 \u2013 i.e. no cache update).

    "},{"location":"authorino/docs/architecture/#repeated-requests","title":"Repeated requests","text":"Not implemented - In analysis (#20)

    For consecutive requests performed, within a given period of time, by a same user that request for a same resource, such that the result of the auth pipeline can be proven that would not change.

    "},{"location":"authorino/docs/architecture/#sharding","title":"Sharding","text":"

    By default, Authorino instances will watch AuthConfig CRs in the entire space (namespace or entire cluster; see Cluster-wide vs. Namespaced instances for details). To support combining multiple Authorino instances and instance modes in the same Kubernetes cluster, and yet avoiding superposition between the instances (i.e. multiple instances reconciling the same AuthConfigs), Authorino offers support for data sharding, i.e. to horizontally narrow down the space of reconciliation of an Authorino instance to a subset of that space.

    The benefits of limiting the space of reconciliation of an Authorino instance include avoiding unnecessary caching and workload in instances that do not receive corresponding traffic (according to your routing settings) and preventing leaders of multiple instances (sets of replicas) to compete on resource status updates (see Resource reconciliation and status update for details).

    Use-cases for sharding of AuthConfigs:

    • Horizontal load balancing of traffic of authorization requests
    • Supporting for managed centralized instances of Authorino to API owners who create and maintain their own AuthConfigs within their own user namespaces.

    Authorino's custom controllers filter the AuthConfig-related events to be reconciled using Kubernetes label selectors, defined for the Authorino instance via --auth-config-label-selector command-line flag. By default, --auth-config-label-selector is empty, meaning all AuthConfigs in the space are watched; this variable can be set to any value parseable as a valid label selector, causing Authorino to then watch only events of AuthConfigs whose metadata.labels match the selector.

    The following are all valid examples of AuthConfig label selector filters:

    --auth-config-label-selector=\"authorino.kuadrant.io/managed-by=authorino\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by=authorino,other-label=other-value\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by in (authorino,kuadrant)\"\n--auth-config-label-selector=\"authorino.kuadrant.io/managed-by!=authorino-v0.4\"\n--auth-config-label-selector=\"!disabled\"\n
    "},{"location":"authorino/docs/architecture/#rbac","title":"RBAC","text":"

    The table below describes the roles and role bindings defined by the Authorino service:

    Role Kind Scope(*) Description Permissions authorino-manager-role ClusterRole C/N Role of the Authorino manager service Watch and reconcile AuthConfigs and Secrets authorino-manager-k8s-auth-role ClusterRole C/N Role for the Kubernetes auth features Create TokenReviews and SubjectAccessReviews (Kubernetes auth) authorino-leader-election-role Role N Leader election role Create/update the ConfigMap used to coordinate which replica of Authorino is the leader authorino-authconfig-editor-role ClusterRole - AuthConfig editor R/W AuthConfigs; Read AuthConfig/status authorino-authconfig-viewer-role ClusterRole - AuthConfig viewer Read AuthConfigs and AuthConfig/status authorino-proxy-role ClusterRole C/N Kube-rbac-proxy-role (sidecar)'s role Create TokenReviews and SubjectAccessReviews to check permissions to the /metrics endpoint authorino-metrics-reader ClusterRole - Metrics reader GET /metrics

    (*) C - Cluster-wide | N - Authorino namespace | C/N - Cluster-wide or Authorino namespace (depending on the deployment mode).

    "},{"location":"authorino/docs/architecture/#observability","title":"Observability","text":"

    Please refer to the Observability user guide for info on Prometheus metrics exported by Authorino, readiness probe, logging, tracing, etc.

    "},{"location":"authorino/docs/contributing/","title":"Developer's Guide","text":""},{"location":"authorino/docs/contributing/#technology-stack-for-developers","title":"Technology stack for developers","text":"

    Minimum requirements to contribute to Authorino are:

    • Golang v1.21+
    • Docker

    Authorino's code was originally bundled using the Operator SDK (v1.9.0).

    The following tools can be installed as part of the development workflow:

    • Installed with go install to the $PROJECT_DIR/bin directory:

      • controller-gen: for building custom types and manifests
      • Kustomize: for assembling flavoured manifests and installing/deploying
      • setup-envtest: for running the tests \u2013 extra tools installed to ./testbin
      • benchstat: for human-friendly test benchmark reports
      • mockgen: to generate mocks for tests \u2013 e.g. ./bin/mockgen -source=pkg/auth/auth.go -destination=pkg/auth/mocks/mock_auth.go
      • Kind: for deploying a containerized Kubernetes cluster for integration testing purposes
    • Other recommended tools to have installed:

      • jq
      • yq
      • gnu-sed
    "},{"location":"authorino/docs/contributing/#workflow","title":"Workflow","text":""},{"location":"authorino/docs/contributing/#check-the-issues","title":"Check the issues","text":"

    Start by checking the list of issues in GitHub.

    In case you want to contribute with an idea for enhancement, a bug fix, or question, please make sure to describe the issue so we can start a conversation together and help you find the best way to get your contribution merged.

    "},{"location":"authorino/docs/contributing/#clone-the-repo-and-setup-the-local-environment","title":"Clone the repo and setup the local environment","text":"

    Fork/clone the repo:

    git clone git@github.com:kuadrant/authorino.git && cd authorino\n

    Download the Golang dependencies:

    make vendor\n

    For additional automation provided, check:

    make help\n
    "},{"location":"authorino/docs/contributing/#make-your-changes","title":"Make your changes","text":"

    Good changes...

    • follow the Golang conventions
    • have proper test coverage
    • address corresponding updates to the docs
    • help us fix wherever we failed to do the above \ud83d\ude1c
    "},{"location":"authorino/docs/contributing/#run-the-tests","title":"Run the tests","text":"

    To run the tests:

    make test\n
    "},{"location":"authorino/docs/contributing/#try-locally","title":"Try locally","text":""},{"location":"authorino/docs/contributing/#build-deploy-and-try-authorino-in-a-local-cluster","title":"Build, deploy and try Authorino in a local cluster","text":"

    The following command will:

    • Start a local Kubernetes cluster (using Kind)
    • Install cert-manager in the cluster
    • Install the Authorino Operator and Authorino CRDs
    • Build an image of Authorino based on the current branch
    • Push the freshly built image to the cluster's registry
    • Generate TLS certificates for the Authorino service
    • Deploy an instance of Authorino
    • Deploy the example application Talker API, a simple HTTP API that echoes back whatever it gets in the request
    • Setup Envoy for proxying to the Talker API and using Authorino for external authorization
    make local-setup\n

    You will be prompted to edit the Authorino custom resource.

    The main workload composed of Authorino instance and user apps (Envoy, Talker API) will be deployed to the default Kubernetes namespace.

    Once the deployment is ready, you can forward the requests on port 8000 to the Envoy service

    kubectl port-forward deployment/envoy 8000:8000 &\n
    Pro tips
    1. Change the default workload namespace by supplying the NAMESPACE argument to your make local-setup and other deployment, apps and local cluster related targets. If the namespace does not exist, it will be created.
    2. Switch to TLS disabled by default when deploying locally by supplying TLS_ENABLED=0 to your make local-setup and make deploy commands. E.g. make local-setup TLS_ENABLED=0.
    3. Skip being prompted to edit the Authorino CR and default to an Authorino deployment with TLS enabled, debug/development log level/mode, and standard name 'authorino', by supplying FF=1 to your make local-setup and make deploy commands. E.g. make local-setup FF=1
    4. Supply DEPLOY_IDPS=1 to make local-setup and make user-apps to deploy Keycloak and Dex to the cluster. DEPLOY_KEYCLOAK and DEPLOY_DEX are also available. Read more about additional tools for specific use cases in the section below.
    5. Saving the ID of the process (PID) of the port-forward command spawned in the background can be useful to later kill and restart the process. E.g. kubectl port-forward deployment/envoy 8000:8000 &;PID=$!; then kill $PID.
    "},{"location":"authorino/docs/contributing/#additional-tools-for-specific-use-cases","title":"Additional tools (for specific use-cases)","text":"Limitador

    To deploy Limitador \u2013 pre-configured in Envoy for rate-limiting the Talker API to 5 hits per minute per user_id when available in the cluster workload \u2013, run:

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\n
    Keycloak

    Authorino examples include a bundle of Keycloak preloaded with the following realm setup:

    • Admin console: http://localhost:8080/admin (admin/p)
    • Preloaded realm: kuadrant
    • Preloaded clients:
      • demo: to which API consumers delegate access and therefore the one which access tokens are issued to
      • authorino: used by Authorino to fetch additional user info with client_credentials grant type
      • talker-api: used by Authorino to fetch UMA-protected resource data associated with the Talker API
    • Preloaded resources:
      • /hello
      • /greetings/1 (owned by user john)
      • /greetings/2 (owned by user jane)
      • /goodbye
    • Realm roles:
      • member (default to all users)
      • admin
    • Preloaded users:
      • john/p (member)
      • jane/p (admin)
      • peter/p (member, email not verified)

    To deploy, run:

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

    Forward local requests to the instance of Keycloak running in the cluster:

    kubectl port-forward deployment/keycloak 8080:8080 &\n
    Dex

    Authorino examples include a bundle of Dex preloaded with the following setup:

    • Preloaded clients:
      • demo: to which API consumers delegate access and therefore the one which access tokens are issued to (Client secret: aaf88e0e-d41d-4325-a068-57c4b0d61d8e)
    • Preloaded users:
      • marta@localhost/password

    To deploy, run:

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/dex/dex-deploy.yaml\n

    Forward local requests to the instance of Dex running in the cluster:

    kubectl port-forward deployment/dex 5556:5556 &\n
    a12n-server

    Authorino examples include a bundle of a12n-server and corresponding MySQL database, preloaded with the following setup:

    • Admin console: http://a12n-server:8531 (admin/123456)
    • Preloaded clients:
      • service-account-1: to obtain access tokens via client_credentials OAuth2 grant type, to consume the Talker API (Client secret: DbgXROi3uhWYCxNUq_U1ZXjGfLHOIM8X3C2bJLpeEdE); includes metadata privilege: { \"talker-api\": [\"read\"] } that can be used to write authorization policies
      • talker-api: to authenticate to the token introspect endpoint (Client secret: V6g-2Eq2ALB1_WHAswzoeZofJ_e86RI4tdjClDDDb4g)

    To deploy, run:

    kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/a12n-server/a12n-server-deploy.yaml\n

    Forward local requests to the instance of a12n-server running in the cluster:

    kubectl port-forward deployment/a12n-server 8531:8531 &\n
    "},{"location":"authorino/docs/contributing/#re-build-and-rollout-latest","title":"Re-build and rollout latest","text":"

    Re-build and rollout latest Authorino image:

    make local-rollout\n

    If you made changes to the CRD between iterations, re-install by running:

    make install\n
    "},{"location":"authorino/docs/contributing/#clean-up","title":"Clean-up","text":"

    The following command deletes the entire Kubernetes cluster started with Kind:

    make local-cleanup\n
    "},{"location":"authorino/docs/contributing/#sign-your-commits","title":"Sign your commits","text":"

    All commits to be accepted to Authorino's code are required to be signed. Refer to this page about signing your commits.

    "},{"location":"authorino/docs/contributing/#logging-policy","title":"Logging policy","text":"

    A few guidelines for adding logging messages in your code:

    1. Make sure you understand Authorino's Logging architecture and policy regarding log levels, log modes, tracing IDs, etc.
    2. Respect controller-runtime's Logging Guidelines.
    3. Do not add sensitive data to your info log messages; instead, redact all sensitive data in your log messages or use debug log level by mutating the logger with V(1) before outputting the message.
    "},{"location":"authorino/docs/contributing/#additional-resources","title":"Additional resources","text":"

    Here in the repo:

    • Getting started
    • Terminology
    • Architecture
    • Feature description

    Other repos:

    • Authorino Operator
    • Authorino examples
    "},{"location":"authorino/docs/contributing/#reach-out","title":"Reach out","text":"

    #kuadrant channel on kubernetes.slack.com.

    "},{"location":"authorino/docs/features/","title":"Features","text":""},{"location":"authorino/docs/features/#overview","title":"Overview","text":"

    We call features of Authorino the different things one can do to enforce identity verification & authentication and authorization on requests to protected services. These can be a specific identity verification method based on a supported authentication protocol, or a method to fetch additional auth metadata in request-time, etc.

    Most features of Authorino relate to the different phases of the Auth Pipeline and therefore are configured in the Authorino AuthConfig. An identity verification/authentication feature usually refers to a functionality of Authorino such as the API key-based authentication, the validation of JWTs/OIDC ID tokens, and authentication based on Kubernetes TokenReviews. Analogously, OPA, pattern-matching and Kubernetes SubjectAccessReview are examples of authorization features of Authorino.

    At a deeper level, a feature can also be an additional functionality within a bigger feature, usually applicable to the whole class the bigger feature belongs to. For instance, the configuration of how auth credentials expected to be carried in the request, which is broadly available for any identity verification method. Other examples are: Identity extension and Priorities.

    A full specification of all features of Authorino that can be configured in an AuthConfig can be found in the official spec of the custom resource definition.

    You can also learn about Authorino features by using the kubectl explain command in a Kubernetes cluster where the Authorino CRD has been installed. E.g. kubectl explain authconfigs.spec.authentication.credentials.

    "},{"location":"authorino/docs/features/#common-feature-json-paths-selector","title":"Common feature: JSON paths (selector)","text":"

    Deprecated: Prefer predicate and expression, based on Common Expression Language (CEL), instead.

    The first feature of Authorino to learn about is a common functionality used in the specification of many other features. JSON paths are selectors of data from the Authorization JSON used in parts of an AuthConfig for referring to dynamic values of each authorization request.

    Usage examples of JSON paths are: dynamic URLs and request parameters when fetching metadata from external sources, dynamic authorization policy rules, and dynamic authorization response attributes (e.g. injected HTTP headers, Festival Wristband token claims, etc).

    "},{"location":"authorino/docs/features/#syntax","title":"Syntax","text":"

    The syntax to fetch data from the Authorization JSON with JSON paths is based on GJSON. Refer to GJSON Path Syntax page for more information.

    "},{"location":"authorino/docs/features/#string-modifiers","title":"String modifiers","text":"

    On top of GJSON, Authorino defines a few string modifiers.

    Examples below provided for the following Authorization JSON:

    {\n  \"context\": {\n    \"request\": {\n      \"http\": {\n        \"path\": \"/pets/123\",\n        \"headers\": {\n          \"authorization\": \"Basic amFuZTpzZWNyZXQK\" // jane:secret\n          \"baggage\": \"eyJrZXkxIjoidmFsdWUxIn0=\" // {\"key1\":\"value1\"}\n        }\n      }\n    }\n  },\n  \"auth\": {\n    \"identity\": {\n      \"username\": \"jane\",\n      \"fullname\": \"Jane Smith\",\n      \"email\": \"\\u0006jane\\u0012@petcorp.com\\n\"\n    },\n  },\n}\n

    @strip Strips out any non-printable characters such as carriage return. E.g. auth.identity.email.@strip \u2192 \"jane@petcorp.com\".

    @case:upper|lower Changes the case of a string. E.g. auth.identity.username.@case:upper \u2192 \"JANE\".

    @replace:{\"old\":string,\"new\":string} Replaces a substring within a string. E.g. auth.identity.username.@replace:{\"old\":\"Smith\",\"new\":\"Doe\"} \u2192 \"Jane Doe\".

    @extract:{\"sep\":string,\"pos\":int} Splits a string at occurrences of a separator (default: \" \") and selects the substring at the pos-th position (default: 0). E.g. context.request.path.@extract:{\"sep\":\"/\",\"pos\":2} \u2192 123.

    @base64:encode|decode base64-encodes or decodes a string value. E.g. auth.identity.username.decoded.@base64:encode \u2192 \"amFuZQo=\".

    In combination with @extract, @base64 can be used to extract the username in an HTTP Basic Authentication request. E.g. context.request.headers.authorization.@extract:{\"pos\":1}|@base64:decode|@extract:{\"sep\":\":\",\"pos\":1} \u2192 \"jane\".

    "},{"location":"authorino/docs/features/#interpolation","title":"Interpolation","text":"

    JSON paths can be interpolated into strings to build template-like dynamic values. E.g. \"Hello, {auth.identity.name}!\".

    "},{"location":"authorino/docs/features/#common-feature-common-expression-language-cel","title":"Common feature: Common Expression Language (CEL)","text":"

    Similar to JSON Paths, Authorino supports Common Expression Language (CEL) for selecting data from the Authorization JSON and representing predicates. This is a more powerful, properly typed alternative to JSON Paths, with a well-documented syntax.

    String extension functions, such as split, substring, indexOf, etc, are also supported.

    Use the expression field for selecting values from the Authorization JSON. The type of the selected value will be converted to a JSON-compatible equivalent. Complex types without a direct JSON equivalent may be converted to objects (e.g. google.golang.org/protobuf/types/known/timestamppb.Timestamp gets converted to { \"seconds\": Number, \"nanos\": Number })

    The most common applications of expression are for building dynamic URLs and request parameters when fetching metadata from external sources, extending properties of identity objects, and dynamic authorization response attributes (e.g. injected HTTP headers, etc).

    Use predicate for expressions that return a boolean value, such as in when conditions and pattern-matching authorization rules.

    "},{"location":"authorino/docs/features/#identity-verification-authentication-features-authentication","title":"Identity verification & authentication features (authentication)","text":""},{"location":"authorino/docs/features/#api-key-authenticationapikey","title":"API key (authentication.apiKey)","text":"

    Authorino relies on Kubernetes Secret resources to represent API keys.

    To define an API key, create a Secret in the cluster containing an api_key entry that holds the value of the API key.

    API key secrets must be created in the same namespace of the AuthConfig (default) or spec.authentication.apiKey.allNamespaces must be set to true (only works with cluster-wide Authorino instances).

    API key secrets must be labeled with the labels that match the selectors specified in spec.authentication.apiKey.selector in the AuthConfig.

    Whenever an AuthConfig is indexed, Authorino will also index all matching API key secrets. In order for Authorino to also watch events related to API key secrets individually (e.g. new Secret created, updates, deletion/revocation), Secrets must also include a label that matches Authorino's bootstrap configuration --secret-label-selector (default: authorino.kuadrant.io/managed-by=authorino). This label may or may not be present to spec.authentication.apiKey.selector in the AuthConfig without implications for the caching of the API keys when triggered by the reconciliation of the AuthConfig; however, if not present, individual changes related to the API key secret (i.e. without touching the AuthConfig) will be ignored by the reconciler.

    Example. For the following AuthConfig:

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\n  namespace: authorino-system\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"api-key-users\":\n      apiKey:\n        selector:\n          matchLabels: # the key-value set used to select the matching `Secret`s; resources including these labels will be accepted as valid API keys to authenticate to this service\n            group: friends # some custom label\n        allNamespaces: true # only works with cluster-wide Authorino instances; otherwise, create the API key secrets in the same namespace of the AuthConfig\n

    The following Kubernetes Secret represents a valid API key:

    apiVersion: v1\nkind: Secret\nmetadata:\n  name: user-1-api-key-1\n  namespace: default\n  labels:\n    authorino.kuadrant.io/managed-by: authorino # so the Authorino controller reconciles events related to this secret\n    group: friends\nstringData:\n  api_key: <some-randomly-generated-api-key-value>\ntype: Opaque\n

    The resolved identity object, added to the authorization JSON following an API key identity source evaluation, is the Kubernetes Secret resource (as JSON).

    "},{"location":"authorino/docs/features/#kubernetes-tokenreview-authenticationkubernetestokenreview","title":"Kubernetes TokenReview (authentication.kubernetesTokenReview)","text":"

    Authorino can verify Kubernetes-valid access tokens (using Kubernetes TokenReview API).

    These tokens can be either ServiceAccount tokens such as the ones issued by kubelet as part of Kubernetes Service Account Token Volume Projection, or any valid user access tokens issued to users of the Kubernetes server API.

    The list of audiences of the token must include the requested host and port of the protected API (default), or all audiences specified in the Authorino AuthConfig custom resource. For example:

    For the following AuthConfig CR, the Kubernetes token must include the audience my-api.io:

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"cluster-users\":\n      kubernetesTokenReview: {}\n

    Whereas for the following AuthConfig CR, the Kubernetes token audiences must include foo and bar:

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"cluster-users\":\n      kubernetesTokenReview:\n        audiences:\n        - foo\n        - bar\n

    The resolved identity object added to the authorization JSON following a successful Kubernetes authentication identity evaluation is the status field of TokenReview response (see TokenReviewStatus for reference).

    "},{"location":"authorino/docs/features/#jwt-verification-authenticationjwt","title":"JWT verification (authentication.jwt)","text":"

    In reconciliation-time, using OpenID Connect Discovery well-known endpoint, Authorino automatically discovers and caches OpenID Connect configurations and associated JSON Web Key Sets (JWKS) for all OpenID Connect issuers declared in an AuthConfig. Then, in request-time, Authorino verifies the JSON Web Signature (JWS) and check the time validity of signed JSON Web Tokens (JWT) supplied on each request.

    Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.

    The kid claim stated in the JWT header must match one of the keys cached by Authorino during OpenID Connect Discovery, therefore supporting JWK rotation.

    The decoded payload of the validated JWT is appended to the authorization JSON as the resolved identity.

    OpenID Connect configurations and linked JSON Web Key Sets can be configured to be automatically refreshed (pull again from the OpenID Connect Discovery well-known endpoints), by setting the authentication.jwt.ttl field (given in seconds, default: 0 \u2013 i.e. auto-refresh disabled).

    For an excellent summary of the underlying concepts and standards that relate OpenID Connect and JSON Object Signing and Encryption (JOSE), see this article by Jan Rusnacko. For official specification and RFCs, see OpenID Connect Core, OpenID Connect Discovery, JSON Web Token (JWT) (RFC7519), and JSON Object Signing and Encryption (JOSE).

    "},{"location":"authorino/docs/features/#oauth-20-introspection-authenticationoauth2introspection","title":"OAuth 2.0 introspection (authentication.oauth2Introspection)","text":"

    For bare OAuth 2.0 implementations, Authorino can perform token introspection on the access tokens supplied in the requests to protected APIs.

    Authorino does not implement any of OAuth 2.0 grants for the applications to obtain the token. However, it can verify supplied tokens with the OAuth server, including opaque tokens, as long as the server exposes the token_introspect endpoint (RFC 7662).

    Developers must set the token introspection endpoint in the AuthConfig, as well as a reference to the Kubernetes secret storing the credentials of the OAuth client to be used by Authorino when requesting the introspect.

    The response returned by the OAuth2 server to the token introspection request is the resolved identity appended to the authorization JSON.

    "},{"location":"authorino/docs/features/#x509-client-certificate-authentication-authenticationx509","title":"X.509 client certificate authentication (authentication.x509)","text":"

    Authorino can verify X.509 certificates presented by clients for authentication on the request to the protected APIs, at application level.

    Trusted root Certificate Authorities (CA) are stored in Kubernetes Secrets labeled according to selectors specified in the AuthConfig, watched and indexed by Authorino. Make sure to create proper kubernetes.io/tls-typed Kubernetes Secrets, containing the public certificates of the CA stored in either a tls.crt or ca.crt entry inside the secret.

    Trusted root CA secrets must be created in the same namespace of the AuthConfig (default) or spec.authentication.x509.allNamespaces must be set to true (only works with cluster-wide Authorino instances).

    Client certificates must include x509 v3 extension specifying 'Client Authentication' extended key usage.

    The identity object resolved out of a client x509 certificate is equal to the subject field of the certificate, and it serializes as JSON within the Authorization JSON usually as follows:

    {\n    \"auth\": {\n        \"identity\": {\n            \"CommonName\": \"aisha\",\n            \"Country\": [\"PK\"],\n            \"ExtraNames\": null,\n            \"Locality\": [\"Islamabad\"],\n            \"Names\": [\n                { \"Type\": [2, 5, 4, 3], \"Value\": \"aisha\" },\n                { \"Type\": [2, 5, 4, 6], \"Value\": \"PK\" },\n                { \"Type\": [2, 5, 4, 7], \"Value\": \"Islamabad\" },\n                { \"Type\": [2, 5, 4,10], \"Value\": \"ACME Inc.\" },\n                { \"Type\": [2, 5, 4,11], \"Value\": \"Engineering\" }\n            ],\n            \"Organization\": [\"ACME Inc.\"],\n            \"OrganizationalUnit\": [\"Engineering\"],\n            \"PostalCode\": null,\n            \"Province\": null,\n            \"SerialNumber\": \"\",\n            \"StreetAddress\": null\n        }\n  }\n}\n
    "},{"location":"authorino/docs/features/#plain-authenticationplain","title":"Plain (authentication.plain)","text":"

    Authorino can read plain identity objects, based on authentication tokens provided and verified beforehand using other means (e.g. Envoy JWT Authentication filter, Kubernetes API server authentication), and injected into the payload to the external authorization service.

    The plain identity object is retrieved from the Authorization JSON. See Common Expression Language (CEL).

    This feature is particularly useful in cases where authentication/identity verification is handled before invoking the authorization service and its resolved value injected in the payload can be trusted. Examples of applications for this feature include:

    • Authentication handled in Envoy leveraging the Envoy JWT Authentication filter (decoded JWT injected as 'metadata_context')
    • Use of Authorino as Kubernetes ValidatingWebhook service (Kubernetes 'userInfo' injected in the body of the AdmissionReview request)

    Example of AuthConfig to retrieve plain identity object from the Authorization JSON.

    spec:\n  authentication:\n    \"pre-validated-jwt\":\n      plain:\n        expression: metadata.filter_metadata['envoy.filters.http.jwt_authn'].verified_jwt\n

    If the specified JSON path does not exist in the Authorization JSON or the value is null, the identity verification will fail and, unless other identity config succeeds, Authorino will halt the Auth Pipeline with the usual 401 Unauthorized.

    "},{"location":"authorino/docs/features/#anonymous-access-authenticationanonymous","title":"Anonymous access (authentication.anonymous)","text":"

    Literally a no-op evaluator for the identity verification phase that returns a static identity object {\"anonymous\":true}.

    It allows to implement AuthConfigs that bypasses the identity verification phase of Authorino, to such as:

    • enable anonymous access to protected services (always or combined with Priorities)
    • postpone authentication in the Auth Pipeline to be resolved as part of an OPA policy

    Example of AuthConfig spec that falls back to anonymous access when OIDC authentication fails, enforcing read-only access to the protected service in such cases:

    spec:\n  authentication:\n    \"jwt\":\n      jwt:\n        issuerUrl: \"\u2026\"\n    \"anonymous\":\n      priority: 1 # expired oidc token, missing creds, etc. default to anonymous access\n      anonymous: {}\n  authorization:\n    \"read-only-access-if-authn-fails\":\n      when:\n\n      - predicate: has(auth.identity.anonymous) && auth.identity.anonymous\n      patternMatching:\n        patterns:\n        - predicate: request.method == 'GET'\n
    "},{"location":"authorino/docs/features/#festival-wristband-authentication","title":"Festival Wristband authentication","text":"

    Authorino-issued Festival Wristband tokens can be validated as any other signed JWT using Authorino's JWT verification.

    The value of the issuer must be the same issuer specified in the custom resource for the protected API originally issuing wristband. Eventually, this can be the same custom resource where the wristband is configured as a valid source of identity, but not necessarily.

    "},{"location":"authorino/docs/features/#extra-auth-credentials-authenticationcredentials","title":"Extra: Auth credentials (authentication.credentials)","text":"

    All the identity verification methods supported by Authorino can be configured regarding the location where access tokens and credentials (i.e. authentication secrets) fly within the request.

    By default, authentication secrets are expected to be supplied in the Authorization HTTP header, with the default Bearer prefix and the plain authentication secret separated by space.

    The full list of supported options is exemplified below:

    spec:\n  authentication:\n    \"creds-in-the-authz-header\":\n      credentials:\n        authorizationHeader:\n          prefix: JWT\n\n    \"creds-in-a-custom-header\":\n      credentials:\n        customHeader:\n          name: X-MY-CUSTOM-HEADER\n          prefix: \"\"\n\n    \"creds-in-a-query-param\":\n      queryString:\n        name: my_param\n\n    \"creds-in-a-cookie-entry\":\n      cookie:\n        name: cookie-key\n
    "},{"location":"authorino/docs/features/#extra-identity-extension-authenticationdefaults-and-authenticationoverrides","title":"Extra: Identity extension (authentication.defaults and authentication.overrides)","text":"

    Resolved identity objects can be extended with user-defined JSON properties. Values can be static or fetched from the Authorization JSON.

    A typical use-case for this feature is token normalization. Say you have more than one identity source listed in your AuthConfig but each source issues an access token with a different JSON structure \u2013 e.g. two OIDC issuers that use different names for custom JWT claims of similar meaning; when two different identity verification/authentication methods are combined, such as API keys (whose identity objects are the corresponding Kubernetes Secrets) and Kubernetes tokens (whose identity objects are Kubernetes UserInfo data).

    In such cases, identity extension can be used to normalize the token to always include the same set of JSON properties of interest, regardless of the source of identity that issued the original token verified by Authorino. This simplifies the writing of authorization policies and configuration of dynamic responses.

    In case of extending an existing property of the identity object (replacing), the API allows to control whether to overwrite the value or not. This is particularly useful for normalizing tokens of a same identity source that nonetheless may occasionally differ in structure, such as in the case of JWT claims that sometimes may not be present but can be safely replaced with another (e.g. username or sub).

    "},{"location":"authorino/docs/features/#external-auth-metadata-features-metadata","title":"External auth metadata features (metadata)","text":""},{"location":"authorino/docs/features/#http-getget-by-post-metadatahttp","title":"HTTP GET/GET-by-POST (metadata.http)","text":"

    Generic HTTP adapter that sends a request to an external service. It can be used to fetch external metadata for the authorization policies (phase ii of the Authorino Auth Pipeline), or as a web hook.

    The adapter allows issuing requests either by GET or POST methods; in both cases with URL and parameters defined by the user in the spec. Dynamic values fetched from the Authorization JSON can be used.

    POST request parameters as well as the encoding of the content can be controlled using the bodyParameters and contentType fields of the config, respectively. The Content-Type of POST requests can be either application/x-www-form-urlencoded (default) or application/json.

    Authentication of Authorino with the external metadata server can be set either via long-lived shared secret stored in a Kubernetes Secret or via OAuth2 client credentials grant. For long-lived shared secret, set the sharedSecretRef field. For OAuth2 client credentials grant, use the oauth2 option.

    In both cases, the location where the secret (long-lived or OAuth2 access token) travels in the request performed to the external HTTP service can be specified in the credentials field. By default, the authentication secret is supplied in the Authorization header with the Bearer prefix.

    Custom headers can be set with the headers field. Nevertheless, headers such as Content-Type and Authorization (or eventual custom header used for carrying the authentication secret, set instead via the credentials option) will be superseded by the respective values defined for the fields contentType and sharedSecretRef.

    "},{"location":"authorino/docs/features/#oidc-userinfo-metadatauserinfo","title":"OIDC UserInfo (metadata.userInfo)","text":"

    Online fetching of OpenID Connect (OIDC) UserInfo data (phase ii of the Authorino Auth Pipeline), associated with an OIDC identity source configured and resolved in phase (i).

    Apart from possibly complementing information of the JWT, fetching OpenID Connect UserInfo in request-time can be particularly useful for remote checking the state of the session, as opposed to only verifying the JWT/JWS offline.

    Implementation requires a JWT verification authentication config (spec.authentication.jwt) in the same AuthConfig, so the well-known configuration of the OpenId Connect (OIDC) issuer can be reused.

    The response returned by the OIDC server to the UserInfo request is appended (as JSON) to auth.metadata in the authorization JSON.

    "},{"location":"authorino/docs/features/#user-managed-access-uma-resource-registry-metadatauma","title":"User-Managed Access (UMA) resource registry (metadata.uma)","text":"

    User-Managed Access (UMA) is an OAuth-based protocol for resource owners to allow other users to access their resources. Since the UMA-compliant server is expected to know about the resources, Authorino includes a client that fetches resource data from the server and adds that as metadata of the authorization payload.

    This enables the implementation of resource-level Attribute-Based Access Control (ABAC) policies. Attributes of the resource fetched in a UMA flow can be, e.g., the owner of the resource, or any business-level attributes stored in the UMA-compliant server.

    A UMA-compliant server is an external authorization server (e.g., Keycloak) where the protected resources are registered. It can be as well the upstream API itself, as long as it implements the UMA protocol, with initial authentication by client_credentials grant to exchange for a Protected API Token (PAT).

    It's important to notice that Authorino does NOT manage resources in the UMA-compliant server. As shown in the flow above, Authorino's UMA client is only to fetch data about the requested resources. Authorino exchanges client credentials for a Protected API Token (PAT), then queries for resources whose URI match the path of the HTTP request (as passed to Authorino by the Envoy proxy) and fetches data of each matching resource.

    The resources data is added as metadata of the authorization payload and passed as input for the configured authorization policies. All resources returned by the UMA-compliant server in the query by URI are passed along. They are available in the PDPs (authorization payload) as input.auth.metadata.custom-name => Array. (See The \"Auth Pipeline\" for details.)

    "},{"location":"authorino/docs/features/#authorization-features-authorization","title":"Authorization features (authorization)","text":""},{"location":"authorino/docs/features/#pattern-matching-authorization-authorizationpatternmatching","title":"Pattern-matching authorization (authorization.patternMatching)","text":"

    Grant/deny access based on simple pattern-matching expressions (\"patterns\") compared against values selected from the Authorization JSON.

    Each expression is composed of exactly one of the following options:

    1. a predicate field - Common Expression Language (CEL) expression that evaluates to a boolean value;
    2. a tuple composed of:
    3. selector: a JSON path to fetch a value from the Authorization JSON
    4. operator: one of: eq (equals), neq (not equal); incl (includes) and excl (excludes), for arrays; and matches, for regular expressions
    5. value: a static string value to compare the value selected from the Authorization JSON with;
    6. a patternRef field \u2013 value that maps to a predefined set of { selector, operator, value } tuples stored at the top-level of the AuthConfig spec (patterns).

    Rules can mix and combine literal expressions and references to expression sets (\"named patterns\") defined at the upper level of the AuthConfig spec. (See Common feature: Conditions)

    spec:\n  authorization:\n    \"my-simple-json-pattern-matching-policy\":\n      patternMatching:\n        patterns: # All patterns must match for access to be granted\n\n        - predicate: auth.identity.email_verified\n        - patternRef: admin\n\n  patterns:\n    admin: # a named pattern that can be reused in other sets of rules or conditions\n\n    - selector: auth.identity.roles\n      operator: incl\n      value: admin\n
    "},{"location":"authorino/docs/features/#open-policy-agent-opa-rego-policies-authorizationopa","title":"Open Policy Agent (OPA) Rego policies (authorization.opa)","text":"

    You can model authorization policies in Rego language and add them as part of the protection of your APIs.

    Policies can be either declared in-line in Rego language (rego) or as an HTTP endpoint where Authorino will fetch the source code of the policy in reconciliation-time (externalPolicy).

    Policies pulled from external registries can be configured to be automatically refreshed (pulled again from the external registry), by setting the authorization.opa.externalPolicy.ttl field (given in seconds, default: 0 \u2013 i.e. auto-refresh disabled).

    Authorino's built-in OPA module precompiles the policies during reconciliation of the AuthConfig and caches the precompiled policies for fast evaluation in runtime, where they receive the Authorization JSON as input.

    An optional field allValues: boolean makes the values of all rules declared in the Rego document to be returned in the OPA output after policy evaluation. When disabled (default), only the boolean value allow is returned. Values of internal rules of the Rego document can be referenced in subsequent policies/phases of the Auth Pipeline.

    "},{"location":"authorino/docs/features/#kubernetes-subjectaccessreview-authorizationkubernetessubjectaccessreview","title":"Kubernetes SubjectAccessReview (authorization.kubernetesSubjectAccessReview)","text":"

    Access control enforcement based on rules defined in the Kubernetes authorization system, i.e. Role, ClusterRole, RoleBinding and ClusterRoleBinding resources of Kubernetes RBAC.

    Authorino issues a SubjectAccessReview (SAR) inquiry that checks with the underlying Kubernetes server whether the user can access a particular resource, resource kind or generic URL.

    It supports resource attributes authorization check (parameters defined in the AuthConfig) and non-resource attributes authorization check (HTTP endpoint inferred from the original request).

    • Resource attributes: adequate for permissions set at namespace level, defined in terms of common attributes of operations on Kubernetes resources (namespace, API group, kind, name, subresource, verb)
    • Non-resource attributes: adequate for permissions set at cluster scope, defined for protected endpoints of a generic HTTP API (URL path + verb)

    Example of Kubernetes role for resource attributes authorization:

    apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: pet-reader\nrules:\n\n- apiGroups: [\"pets.io\"]\n  resources: [\"pets\"]\n  verbs: [\"get\"]\n

    Example of Kubernetes cluster role for non-resource attributes authorization:

    apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: pet-editor\nrules:\n\n- nonResourceURLs: [\"/pets/*\"]\n  verbs: [\"put\", \"delete\"]\n

    Kubernetes' authorization policy configs look like the following in an Authorino AuthConfig:

    authorization:\n  \"kubernetes-rbac\":\n    kubernetesSubjectAccessReview:\n      user: # values of the parameter can be fixed (`value`) or fetched from the Authorization JSON (`selector`)\n        expression: auth.identity.metadata.annotations.userid\n\n      groups: [] # user groups to test for.\n\n      # for resource attributes permission checks; omit it to perform a non-resource attributes SubjectAccessReview with path and method/verb assumed from the original request\n      # if included, use the resource attributes, where the values for each parameter can be fixed (`value`) or fetched from the Authorization JSON (`selector`)\n      resourceAttributes:\n        namespace:\n          value: default\n        group:\n          value: pets.io # the api group of the protected resource to be checked for permissions for the user\n        resource:\n          value: pets # the resource kind\n        name:\n          expression: request.path.split('/')[2] # resource name \u2013 e.g., the {id} in `/pets/{id}`\n        verb:\n          expression: request.method.lowerAscii() # api operation \u2013 e.g., copying from the context to use the same http method of the request\n

    user and properties of resourceAttributes can be defined from fixed values or patterns of the Authorization JSON.

    An array of groups (optional) can as well be set. When defined, it will be used in the SubjectAccessReview request.

    "},{"location":"authorino/docs/features/#spicedb-authorizationspicedb","title":"SpiceDB (authorization.spicedb)","text":"

    Check permission requests via gRPC with an external Google Zanzibar-inspired SpiceDB server, by Authzed.

    Subject, resource and permission parameters can be set to static values or read from the Authorization JSON.

    spec:\n  authorization:\n    \"spicedb\":\n      spicedb:\n        endpoint: spicedb:50051\n        insecure: true # disables TLS\n        sharedSecretRef:\n          name: spicedb\n          key: token\n        subject:\n          kind:\n            value: blog/user\n          name:\n            expression: auth.identity.sub\n        resource:\n          kind:\n            value: blog/post\n          name:\n            expression: request.path.split('/')[2] # /posts/{id}\n        permission:\n          expression: request.method\n
    "},{"location":"authorino/docs/features/#custom-response-features-response","title":"Custom response features (response)","text":""},{"location":"authorino/docs/features/#custom-response-forms-successful-authorization-vs-custom-denial-status","title":"Custom response forms: successful authorization vs custom denial status","text":"

    The response to the external authorization request can be customized in the following fashion:

    • Successful authorization (response.success)
    • Added HTTP headers (response.success.headers)
    • Envoy Dynamic Metadata (response.success.dynamicMetadata)
    • Custom denial status
    • Unauthenticated (response.unauthenticated)
    • Unauthorized (response.unauthorized)

    Successful authorization custom responses can be set based on any of the supported custom authorization methods:

    • Plain text value
    • JSON injection
    • Festival Wristband Tokens
    "},{"location":"authorino/docs/features/#added-http-headers","title":"Added HTTP headers","text":"

    Set custom responses as HTTP headers injected in the request post-successful authorization by specifying one of the supported methods under response.success.headers.

    The name of the response config (default) or the value of the key option (if provided) will used as the name of the header.

    "},{"location":"authorino/docs/features/#envoy-dynamic-metadata","title":"Envoy Dynamic Metadata","text":"

    Authorino custom response methods can also be used to propagate Envoy Dynamic Metadata. To do so, set one of the supported methods under response.success.dynamicMetadata.

    The name of the response config (default) or the value of the key option (if provided) will used as the name of the root property of the dynamic metadata content.

    A custom response exported as Envoy Dynamic Metadata can be set in the Envoy route or virtual host configuration as input to a consecutive filter in the filter chain.

    E.g., to read metadata emitted by the authorization service with scheme { \"auth-data\": { \"api-key-ns\": string, \"api-key-name\": string } }, as input in a rate limit configuration placed in the filter chain after the external authorization, the Envoy config may look like the following:

    # Envoy config snippet to inject `user_namespace` and `username` rate limit descriptors from metadata emitted by Authorino\nrate_limits:\n\n- actions:\n  - metadata:\n      metadata_key:\n        key: \"envoy.filters.http.ext_authz\"\n        path:\n        - key: auth-data # root of the dynamic metadata object, as declared in a custom response config of the AuthConfig (name or key)\n        - key: api-key-ns\n      descriptor_key: user_namespace\n  - metadata:\n      metadata_key:\n        key: \"envoy.filters.http.ext_authz\"\n        path:\n        - key: auth-data # root of the dynamic metadata object, as declared in a custom response config of the AuthConfig (name or key)\n        - key: api-key-name\n      descriptor_key: username\n
    "},{"location":"authorino/docs/features/#custom-denial-status-responseunauthenticated-and-responseunauthorized","title":"Custom denial status (response.unauthenticated and response.unauthorized)","text":"

    By default, Authorino will inform Envoy to respond with 401 Unauthorized or 403 Forbidden respectively when the identity verification (phase i of the Auth Pipeline) or authorization (phase ii) fail. These can be customized respectively by specifying spec.response.unauthanticated and spec.response.unauthorized in the AuthConfig.

    "},{"location":"authorino/docs/features/#custom-response-methods","title":"Custom response methods","text":""},{"location":"authorino/docs/features/#plain-text-responsesuccessheadersdynamicmetadataplain","title":"Plain text (response.success.<headers|dynamicMetadata>.plain)","text":"

    Simpler, yet more generalized form, for extending the authorization response for header mutation and Envoy Dynamic Metadata, based on plain text values.

    The value can be static:

    response:\n  success:\n    headers:\n      \"x-auth-service\"\n        plain:\n          value: Authorino\n

    or fetched dynamically from the Authorization JSON (which includes support for interpolation):

    response:\n  success:\n    headers:\n      \"x-username\":\n        plain:\n          expression: auth.identity.username\n
    "},{"location":"authorino/docs/features/#json-injection-responsesuccessheadersdynamicmetadatajson","title":"JSON injection (response.success.<headers|dynamicMetadata>.json)","text":"

    User-defined dynamic JSON objects generated by Authorino in the response phase, from static or dynamic data of the auth pipeline, and passed back to the external authorization client within added HTTP headers or Dynamic Metadata.

    The following Authorino AuthConfig custom resource is an example that defines 3 dynamic JSON response items, where two items are returned to the client, stringified, in added HTTP headers, and the third as Envoy Dynamic Metadata. Envoy proxy can be configured to propagate the dynamic metadata emitted by Authorino into another filter \u2013 e.g. the rate limit filter.

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  namespace: my-namespace\n  name: my-api-protection\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"edge\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino.kuadrant.io/managed-by: authorino\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  response:\n    success:\n      headers:\n        \"x-my-custom-header\":\n          json:\n            properties:\n              \"prop1\":\n                value: value1\n              \"prop2\":\n                expression: some.path.within.auth.json\n        \"x-ext-auth-other-json\":\n          json:\n            properties:\n              \"propX\":\n                value: valueX\n\n      dynamicMetadata:\n        \"auth-data\":\n          json:\n            properties:\n              \"api-key-ns\":\n                expression: auth.identity.metadata.namespace\n              \"api-key-name\":\n                expression: auth.identity.metadata.name\n
    "},{"location":"authorino/docs/features/#festival-wristband-tokens-responsesuccessheadersdynamicmetadatawristband","title":"Festival Wristband tokens (response.success.<headers|dynamicMetadata>.wristband)","text":"

    Festival Wristbands are signed OpenID Connect JSON Web Tokens (JWTs) issued by Authorino at the end of the auth pipeline and passed back to the client, typically in added HTTP response header. It is an opt-in feature that can be used to implement Edge Authentication Architecture (EAA) and enable token normalization. Authorino wristbands include minimal standard JWT claims such as iss, iat, and exp, and optional user-defined custom claims, whose values can be static or dynamically fetched from the authorization JSON.

    The Authorino AuthConfig custom resource below sets an API protection that issues a wristband after a successful authentication via API key. Apart from standard JWT claims, the wristband contains 2 custom claims: a static value aud=internal and a dynamic value born that fetches from the authorization JSON the date/time of creation of the secret that represents the API key used to authenticate.

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  namespace: my-namespace\n  name: my-api-protection\nspec:\n  hosts:\n\n  - my-api.io\n  authentication:\n    \"edge\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino.kuadrant.io/managed-by: authorino\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  response:\n    success:\n      headers:\n        \"x-wristband\":\n          wristband:\n            issuer: https://authorino-oidc.default.svc:8083/my-namespace/my-api-protection/x-wristband\n            customClaims:\n              \"aud\":\n                value: internal\n              \"age\":\n                expression: int(request.time.seconds) - (timestamp(auth.identity.metadata.creationTimestamp) - timestamp(\"1970-01-01T00:00:00Z\")).getSeconds()\n            tokenDuration: 300\n            signingKeyRefs:\n            - name: my-signing-key\n              algorithm: ES256\n            - name: my-old-signing-key\n              algorithm: RS256\n

    The signing key names listed in signingKeyRefs must match the names of Kubernetes Secret resources created in the same namespace, where each secret contains a key.pem entry that holds the value of the private key that will be used to sign the wristbands issued, formatted as PEM. The first key in this list will be used to sign the wristbands, while the others are kept to support key rotation.

    For each protected API configured for the Festival Wristband issuing, Authorino exposes the following OpenID Connect Discovery well-known endpoints (available for requests within the cluster):

    • OpenID Connect configuration: https://authorino-oidc.default.svc:8083/{namespace}/{api-protection-name}/{response-config-name}/.well-known/openid-configuration
    • JSON Web Key Set (JWKS) well-known endpoint: https://authorino-oidc.default.svc:8083/{namespace}/{api-protection-name}/{response-config-name}/.well-known/openid-connect/certs
    "},{"location":"authorino/docs/features/#callbacks-callbacks","title":"Callbacks (callbacks)","text":""},{"location":"authorino/docs/features/#http-endpoints-callbackshttp","title":"HTTP endpoints (callbacks.http)","text":"

    Sends requests to specified HTTP endpoints at the end of the auth pipeline.

    The scheme of the http field is the same as of metadata.http.

    Example:

    spec:\n  authentication: [\u2026]\n  authorization: [\u2026]\n\n  callbacks:\n    \"log\":\n      http:\n        url: http://logsys\n        method: POST\n        body:\n          expression: |\n            { \"requestId\": request.id, \"username\": auth.identity.username, \"authorizationResult\": auth.authorization }\n    \"important-forbidden\":\n      when:\n\n      - predicate: \"!auth.authorization.important-policy\"\n      http:\n        urlExpression: |\n          \"http://monitoring/important?forbidden-user=\" + auth.identity.username\n
    "},{"location":"authorino/docs/features/#common-feature-priorities","title":"Common feature: Priorities","text":"

    Priorities allow to set sequence of execution for blocks of concurrent evaluators within phases of the Auth Pipeline.

    Evaluators of same priority execute concurrently to each other \"in a block\". After syncing that block (i.e. after all evaluators of the block have returned), the next block of evaluator configs of consecutive priority is triggered.

    Use cases for priorities are:

    1. Saving expensive tasks to be triggered when there's a high chance of returning immediately after finishing executing a less expensive one \u2013 e.g.
      • an identity config that calls an external IdP to verify a token that is rarely used, compared to verifying JWTs preferred by most users of the service;
      • an authorization policy that performs some quick checks first, such as verifying allowed paths, and only if it passes, moves to the evaluation of a more expensive policy.
    2. Establishing dependencies between evaluators - e.g.
      • an external metadata request that needs to wait until a previous metadata responds first (in order to use data from the response)

    Priorities can be set using the priority property available in all evaluator configs of all phases of the Auth Pipeline (identity, metadata, authorization and response). The lower the number, the highest the priority. By default, all evaluators have priority 0 (i.e. highest priority).

    Consider the following example to understand how priorities work:

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api\n  authentication:\n    \"tier-1\":\n      priority: 0\n      apiKey:\n        selector:\n          matchLabels:\n            tier: \"1\"\n    \"tier-2\":\n      priority: 1\n      apiKey:\n        selector:\n          matchLabels:\n            tier: \"2\"\n    \"tier-3\":\n      priority: 1\n      apiKey:\n        selector:\n          matchLabels:\n            tier: \"3\"\n  metadata:\n    \"first\":\n      http:\n        url: http://talker-api:3000\n    \"second\":\n      priority: 1\n      http:\n        url: http://talker-api:3000/first_uuid={auth.metadata.first.uuid}\n  authorization:\n    \"allowed-endpoints\":\n      when:\n      - predicate: |\n          !(request.path in ['/hi', '/hello', '/aloha', '/ciao'])\n      patternMatching:\n        patterns:\n        - pattern: \"true\"\n    \"more-expensive-policy\": # no point in evaluating this one if it's not an allowed endpoint\n      priority: 1\n      opa:\n        rego: |\n          allow { true }\n  response:\n    success:\n      headers:\n        \"x-auth-data\":\n          json:\n            properties:\n              \"tier\":\n                expression: auth.identity.metadata.labels.tier\n              \"first-uuid\":\n                expression: auth.metadata.first.uuid\n              \"second-uuid\":\n                expression: auth.metadata.second.uuid\n              \"second-path\":\n                expression: auth.metadata.second.path\n

    For the AuthConfig above,

    • Identity configs tier-2 and tier-3 (priority 1) will only trigger (concurrently) in case tier-1 (priority 0) fails to validate the authentication token first. (This behavior happens without prejudice of context canceling between concurrent evaluators \u2013 i.e. evaluators that are triggered concurrently to another, such as tier-2 and tier-3, continue to cancel the context of each other if any of them succeeds validating the token first.)

    • Metadata source second (priority 1) uses the response of the request issued by metadata source first (priority 0), so it will wait for first to finish by triggering only in the second block.

    • Authorization policy allowed-endpoints (priority 0) is considered to be a lot less expensive than more-expensive-policy (priority 1) and has a high chance of denying access to the protected service (if the path is not one of the allowed endpoints). By setting different priorities to these policies we ensure the more expensive policy if triggered in sequence of the less expensive one, instead of concurrently.

    "},{"location":"authorino/docs/features/#common-feature-conditions-when","title":"Common feature: Conditions (when)","text":"

    Conditions, identified by the when field in the AuthConfig API, are logical expressions (\"predicates\") that can be used to condition the evaluation of a particular auth rule, as well as of the AuthConfig altogether (\"top-level conditions\").

    The predicates are evaluated against the Authorization JSON, where each predicate is composed of exactly one of the following options:

    1. a predicate field \u2013 CEL expression that evaluates to a boolean value;
    2. a tuple composed of:
    3. selector: a JSON path to fetch a value from the Authorization JSON
    4. operator: one of: eq (equals); neq (not equal); incl (includes) and excl (excludes), for when the value fetched from the Authorization JSON is expected to be an array; matches, for regular expressions
    5. value: a static string value to compare the value selected from the Authorization JSON with;
    6. a patternRef field \u2013 value that maps to a predefined set of { selector, operator, value } tuples stored at the top-level of the AuthConfig spec (patterns).

    An expression contains one or more patterns and they must either all evaluate to true (\"AND\" operator, declared by grouping the patterns within an all block) or at least one of the patterns must be true (\"OR\" operator, when grouped within an any block.) Patterns not explicitly grouped are AND'ed by default.

    Examples of when conditions

    i) to skip an entire AuthConfig based on the context (AND operator assumed by default):

    spec:\n  when: # auth enforced only on requests to POST /resources/*\n\n  - predicate: request.method == 'POST' && request.path.matches(\"^/resources/.*\")\n

    ii) equivalent to the above using { selector, operator, value } tuples and an explicit AND operator (all):

    spec:\n  when: # auth enforced only on requests to POST /resources/*\n\n  - all:\n    - selector: request.method\n      operator: eq\n      value: POST\n    - selector: request.path\n      operator: matches\n      value: ^/resources/.*\n

    iii) OR condition (any) using { selector, operator, value } tuples:

    spec:\n  when: # auth enforced only on requests with HTTP method equals to POST or PUT\n\n  - any:\n    - selector: request.method\n      operator: eq\n      value: POST\n    - selector: request.method\n      operator: eq\n      value: PUT\n

    iv) complex expression with nested operations using { selector, operator, value } tuples:

    spec:\n  when: # auth enforced only on requests to POST /resources/* or PUT /resources/*\n\n  - any:\n    - all:\n      - selector: request.method\n        operator: eq\n        value: POST\n      - selector: request.path\n        operator: matches\n        value: ^/resources/.*\n    - all:\n      - selector: request.method\n        operator: eq\n        value: PUT\n      - selector: request.path\n        operator: matches\n        value: ^/resources/.*\n

    v) more concise equivalent of the above using CEL:

    spec:\n  when: # auth enforced only on requests to /resources/* path with method equals to POST or PUT\n\n  - predicate: request.path .matches(\"^/resources/.*\") && request.method in ['POST', 'PUT']\n

    vi) to skip part of an AuthConfig (i.e., a specific auth rule):

    spec:\n  metadata:\n    \"metadata-source\":\n      http:\n        url: https://my-metadata-source.io\n      when: # only fetch the external metadata if the context is HTTP method other than OPTIONS\n\n      - predicate: request.method != 'OPTIONS'\n

    vii) skipping part of an AuthConfig will not affect other auth rules:

    spec:\n  authentication:\n    \"authn-meth-1\":\n      apiKey: {\u2026} # this auth rule only triggers for POST requests to /foo[/*]\n      when:\n\n      - predicate: request.method == 'POST' && request.path.matches(\"^/foo(/.*)?$\")\n\n    \"authn-meth-2\": # this auth rule triggerred regardless\n      jwt: {\u2026}\n

    viii) concrete use-case: evaluating only the necessary identity checks based on the user's indication of the preferred authentication method (prefix of the value supplied in the HTTP Authorization request header):

    spec:\n  authentication:\n    \"jwt\":\n      when:\n\n      - predicate: request.headers['authorization'].startsWith('JWT')\n      jwt: {\u2026}\n\n    \"api-key\":\n      when:\n\n      - predicate: request.headers['authorization'].startsWith('APIKEY')\n      apiKey: {\u2026}\n

    ix) to avoid repetition while defining patterns for conditions:

    spec:\n  patterns:\n    a-pet: # a named pattern that can be reused in sets of conditions\n\n    - selector: context.request.http.path\n      operator: matches\n      value: ^/pets/\\d+(/.*)$\n\n  metadata:\n    \"pets-info\":\n      when:\n\n      - patternRef: a-pet\n      http:\n        urlExpression: |\n          \"https://pets-info.io?petId=\" + request.path.split('/')[2]\n\n  authorization:\n    \"pets-owners-only\":\n      when:\n\n      - patternRef: a-pet\n      opa:\n        rego: |\n          allow { input.metadata[\"pets-info\"].ownerid == input.auth.identity.userid }\n

    x) combining literals and refs \u2013 concrete case: authentication required for selected operations:

    spec:\n  patterns:\n    api-base-path:\n\n    - selector: request.path\n      operator: matches\n      value: ^/api/.*\n\n    authenticated-user:\n\n    - selector: auth.identity.anonymous\n      operator: neq\n      value: \"true\"\n\n  authentication:\n    api-users: # tries to authenticate all requests to path /api/*\n      when:\n\n      - patternRef: api-base-path\n      jwt: {\u2026}\n\n    others: # defaults to anonymous access when authentication fails or not /api/* path\n      anonymous: {}\n      priority: 1\n\n  authorization:\n    api-write-access-requires-authentication: # POST/PUT/DELETE requests to /api/* path cannot be anonymous\n      when:\n\n      - patternRef: api-base-path\n      - predicate: request.method in ['POST', 'PUT', 'DELETE']\n      opa:\n        patternMatching:\n          rules:\n          - patternRef: authenticated-user\n\n  response: # bonus: export user data if available\n    success:\n      dynamicMetadata:\n        \"user-data\":\n          when:\n\n          - patternRef: authenticated-user\n          json:\n            properties:\n              jwt-claims:\n                expression: auth.identity\n
    "},{"location":"authorino/docs/features/#common-feature-caching-cache","title":"Common feature: Caching (cache)","text":"

    Objects resolved at runtime in an Auth Pipeline can be cached \"in-memory\", and avoided being evaluated again at a subsequent request, until it expires. A lookup cache key and a TTL can be set individually for any evaluator config in an AuthConfig.

    Each cache config induces a completely independent cache table (or \"cache namespace\"). Consequently, different evaluator configs can use the same cache key and there will be no collision between entries from different evaluators.

    E.g.:

    spec:\n  hosts:\n\n  - my-api.io\n\n  authentication: [\u2026]\n\n  metadata:\n    \"external-metadata\":\n      http:\n        urlExpression: |\n          \"http://my-external-source?search=\" + request.path\n      cache:\n        key:\n          expression: request.path\n        ttl: 300\n\n  authorization:\n    \"complex-policy\":\n      opa:\n        externalPolicy:\n          url: http://my-policy-registry\n      cache:\n        key:\n          expression: auth.identity.group + '-' + request.method + '-' + request.path\n        ttl: 60\n

    The example above sets caching for the 'external-metadata' metadata config and for the 'complex-policy' authorization policy. In the case of 'external-metadata', the cache key is the path of the original HTTP request being authorized by Authorino (fetched dynamically from the Authorization JSON); i.e., after obtaining a metadata object from the external source for a given contextual HTTP path one first time, whenever that same HTTP path repeats in a subsequent request, Authorino will use the cached object instead of sending a request again to the external source of metadata. After 5 minutes (300 seconds), the cache entry will expire and Authorino will fetch again from the source if requested.

    As for the 'complex-policy' authorization policy, the cache key is a string composed the 'group' the identity belongs to, the method of the HTTP request and the path of the HTTP request. Whenever these repeat, Authorino will use the result of the policy that was evaluated and cached priorly. Cache entries in this namespace expire after 60 seconds.

    Notes on evaluator caching

    Capacity - By default, each cache namespace is limited to 1 mb. Entries will be evicted following First-In-First-Out (FIFO) policy to release space. The individual capacity of cache namespaces is set at the level of the Authorino instance (via --evaluator-cache-size command-line flag or spec.evaluatorCacheSize field of the Authorino CR).

    Usage - Avoid caching objects whose evaluation is considered to be relatively cheap. Examples of operations associated to Authorino auth features that are usually NOT worth caching: validation of JSON Web Tokens (JWT), Kubernetes TokenReviews and SubjectAccessReviews, API key validation, simple JSON pattern-matching authorization rules, simple OPA policies. Examples of operations where caching may be desired: OAuth2 token introspection, fetching of metadata from external sources (via HTTP request), complex OPA policies.

    "},{"location":"authorino/docs/features/#common-feature-metrics-metrics","title":"Common feature: Metrics (metrics)","text":"

    By default, Authorino will only export metrics down to the level of the AuthConfig. Deeper metrics at the level of each evaluator within an AuthConfig can be activated by setting the common field metrics: true of the evaluator config.

    E.g.:

    apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-authconfig\n  namespace: my-ns\nspec:\n  metadata:\n    \"my-external-metadata\":\n      http:\n        url: http://my-external-source?search={request.path}\n      metrics: true\n

    The above will enable the metrics auth_server_evaluator_duration_seconds (histogram) and auth_server_evaluator_total (counter) with labels namespace=\"my-ns\", authconfig=\"my-authconfig\", evaluator_type=\"METADATA_GENERIC_HTTP\" and evaluator_name=\"my-external-metadata\".

    The same pattern works for other types of evaluators. Find below the list of all types and corresponding label constant used in the metric:

    Evaluator type Metric's evaluator_type label authentication.apiKey IDENTITY_APIKEY authentication.kubernetesTokenReview IDENTITY_KUBERNETES authentication.jwt IDENTITY_OIDC authentication.oauth2Introspection IDENTITY_OAUTH2 authentication.x509 IDENTITY_MTLS authentication.plain IDENTITY_PLAIN authentication.anonymous IDENTITY_NOOP metadata.http METADATA_GENERIC_HTTP metadata.userInfo METADATA_USERINFO metadata.uma METADATA_UMA authorization.patternMatching AUTHORIZATION_JSON authorization.opa AUTHORIZATION_OPA authorization.kubernetesSubjectAccessReview AUTHORIZATION_KUBERNETES authorization.spicedb AUTHORIZATION_AUTHZED response.success..plain RESPONSE_PLAIN response.success..json RESPONSE_JSON response.success..wristband RESPONSE_WRISTBAND

    Metrics at the level of the evaluators can also be enforced to an entire Authorino instance, by setting the --deep-metrics-enabled command-line flag. In this case, regardless of the value of the field spec.(authentication|metadata|authorization|response).metrics in the AuthConfigs, individual metrics for all evaluators of all AuthConfigs will be exported.

    For more information about metrics exported by Authorino, see Observability.

    "},{"location":"authorino/docs/getting-started/","title":"Getting started","text":"

    This page covers requirements and instructions to deploy Authorino on a Kubernetes cluster, as well as the steps to declare, apply and try out a protection layer of authentication and authorization over your service, clean-up and complete uninstallation.

    If you prefer learning with an example, check out our Hello World.

    "},{"location":"authorino/docs/getting-started/#requirements","title":"Requirements","text":""},{"location":"authorino/docs/getting-started/#platform-requirements","title":"Platform requirements","text":"

    These are the platform requirements to use Authorino:

    • Kubernetes server (recommended v1.21 or later), with permission to create Kubernetes Custom Resource Definitions (CRDs) (for bootstrapping Authorino and Authorino Operator)

      Alternative: K8s distros and platforms

      Alternatively to upstream Kubernetes, you should be able to use any other Kubernetes distribution or Kubernetes Management Platform (KMP) with support for Kubernetes Custom Resources Definitions (CRD) and custom controllers, such as Red Hat OpenShift, IBM Cloud Kubernetes Service (IKS), Google Kubernetes Engine (GKE), Amazon Elastic Kubernetes Service (EKS) and Azure Kubernetes Service (AKS).

    • Envoy proxy (recommended v1.19 or later), to wire up Upstream services (i.e. the services to be protected with Authorino) and external authorization filter (Authorino) for integrations based on the reverse-proxy architecture - example

      Alternative: Non-reverse-proxy integration

      Technically, any client that implements Envoy's external authorization gRPC protocol should be compatible with Authorino. For integrations based on the reverse-proxy architecture nevertheless, we strongly recommended that you leverage Envoy alongside Authorino.

      "},{"location":"authorino/docs/getting-started/#feature-specific-requirements","title":"Feature-specific requirements","text":"

      A few examples are:

      • For OpenID Connect, make sure you have access to an identity provider (IdP) and an authority that can issue ID tokens (JWTs). Check out Keycloak which can solve both and connect to external identity sources and user federation like LDAP.

      • For Kubernetes authentication tokens, platform support for the TokenReview and SubjectAccessReview APIs of Kubernetes shall be required. In case you want to be able to requests access tokens for clients running outside the custer, you may also want to check out the requisites for using Kubernetes TokenRequest API (GA in v1.20).

      • For User-Managed Access (UMA) resource data, you will need a UMA-compliant server running as well. This can be an implementation of the UMA protocol by each upstream API itself or (more typically) an external server that knows about the resources. Again, Keycloak can be a good fit here as well. Just keep in mind that, whatever resource server you choose, changing-state actions commanded in the upstream APIs or other parties will have to be reflected in the resource server. Authorino will not do that for you.

      Check out the Feature specification page for more feature-specific requirements.

      "},{"location":"authorino/docs/getting-started/#installation","title":"Installation","text":""},{"location":"authorino/docs/getting-started/#step-install-the-authorino-operator","title":"Step: Install the Authorino Operator","text":"

      The simplest way to install the Authorino Operator is by applying the manifest bundle:

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n

      The above will install the latest build of the Authorino Operator and latest version of the manifests (CRDs and RBAC), which by default points as well to the latest build of Authorino, both based on the main branches of each component. To install a stable released version of the Operator and therefore also defaults to its latest compatible stable release of Authorino, replace main with another tag of a proper release of the Operator, e.g. 'v0.2.0'.

      This step will also install cert-manager in the cluster (required).

      Alternatively, you can deploy the Authorino Operator using the Operator Lifecycle Manager bundles. For instructions, check out Installing via OLM.

      "},{"location":"authorino/docs/getting-started/#step-request-an-authorino-instance","title":"Step: Request an Authorino instance","text":"

      Choose either cluster-wide or namespaced deployment mode and whether you want TLS termination enabled for the Authorino endpoints (gRPC authorization, raw HTTP authorization, and OIDC Festival Wristband Discovery listeners), and follow the corresponding instructions below.

      The instructions here are for centralized gateway or centralized authorization service architecture. Check out the Topologies section of the docs for alternatively running Authorino in a sidecar container.

      Cluster-wide (with TLS)

      Create the namespace:

      kubectl create namespace authorino\n

      Create the TLS certificates (requires cert-manager; skip if you already have certificates and certificate keys created and stored in Kubernetes Secrets in the namespace):

      curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/authorino/g\" | kubectl -n authorino apply -f -\n

      Deploy Authorino:

      kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  replicas: 1\n  clusterWide: true\n  listener:\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-server-cert\n  oidcServer:\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-oidc-server-cert\nEOF\n

      Cluster-wide (without TLS)
      kubectl create namespace authorino\nkubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  image: quay.io/kuadrant/authorino:latest\n  replicas: 1\n  clusterWide: true\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      Namespaced (with TLS)

      Create the namespace:

      kubectl create namespace myapp\n

      Create the TLS certificates (requires cert-manager; skip if you already have certificates and certificate keys created and stored in Kubernetes Secrets in the namespace):

      curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/myapp/g\" | kubectl -n myapp apply -f -\n

      Deploy Authorino:

      kubectl -n myapp apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  image: quay.io/kuadrant/authorino:latest\n  replicas: 1\n  clusterWide: false\n  listener:\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-server-cert\n  oidcServer:\n    tls:\n      enabled: true\n      certSecretRef:\n        name: authorino-oidc-server-cert\nEOF\n

      Namespaced (without TLS)
      kubectl create namespace myapp\nkubectl -n myapp apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  image: quay.io/kuadrant/authorino:latest\n  replicas: 1\n  clusterWide: false\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/getting-started/#protect-a-service","title":"Protect a service","text":"

      The most typical integration to protect services with Authorino is by putting the service (upstream) behind a reverse-proxy or API gateway, enabled with an authorization filter that ensures all requests to the service are first checked with the authorization server (Authorino).

      To do that, make sure you have your upstream service deployed and running, usually in the same Kubernetes server where you installed Authorino. Then, setup an Envoy proxy and create an Authorino AuthConfig for your service.

      Authorino exposes 2 interfaces to serve the authorization requests:

      • a gRPC interface that implements Envoy's External Authorization protocol;
      • a raw HTTP authorization interface, suitable for using Authorino with Kubernetes ValidatingWebhook, for Envoy external authorization via HTTP, and other integrations (e.g. other proxies).

      To use Authorino as a simple satellite (sidecar) Policy Decision Point (PDP), applications can integrate directly via any of these interfaces. By integrating via a proxy or API gateway, the combination makes Authorino to perform as an external Policy Enforcement Point (PEP) completely decoupled from the application.

      "},{"location":"authorino/docs/getting-started/#life-cycle","title":"Life cycle","text":""},{"location":"authorino/docs/getting-started/#step-setup-envoy","title":"Step: Setup Envoy","text":"

      To configure Envoy for proxying requests targeting the upstream service and authorizing with Authorino, setup an Envoy configuration that enables Envoy's external authorization HTTP filter. Store the configuration in a ConfigMap.

      These are the important bits in the Envoy configuration to activate Authorino:

      static_resources:\n  listeners:\n\n  - address: {\u2026} # TCP socket address and port of the proxy\n    filter_chains:\n    - filters:\n      - name: envoy.http_connection_manager\n        typed_config:\n          \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n          route_config: {\u2026} # routing configs - virtual host domain and endpoint matching patterns and corresponding upstream services to redirect the traffic\n          http_filters:\n          - name: envoy.filters.http.ext_authz # the external authorization filter\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n              transport_api_version: V3\n              failure_mode_allow: false # ensures only authenticated and authorized traffic goes through\n              grpc_service:\n                envoy_grpc:\n                  cluster_name: authorino\n                timeout: 1s\n  clusters:\n  - name: authorino\n    connect_timeout: 0.25s\n    type: strict_dns\n    lb_policy: round_robin\n    http2_protocol_options: {}\n    load_assignment:\n      cluster_name: authorino\n      endpoints:\n      - lb_endpoints:\n        - endpoint:\n            address:\n              socket_address:\n                address: authorino-authorino-authorization # name of the Authorino service deployed \u2013 it can be the fully qualified name with `.<namespace>.svc.cluster.local` suffix (e.g. `authorino-authorino-authorization.myapp.svc.cluster.local`)\n                port_value: 50051\n    transport_socket: # in case TLS termination is enabled in Authorino; omit it otherwise\n      name: envoy.transport_sockets.tls\n      typed_config:\n        \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n        common_tls_context:\n          validation_context:\n            trusted_ca:\n              filename: /etc/ssl/certs/authorino-ca-cert.crt\n

      For a complete Envoy ConfigMap containing an upstream API protected with Authorino, with TLS enabled and option for rate limiting with Limitador, plus a webapp served with under the same domain of the protected API, check out this example.

      After creating the ConfigMap with the Envoy configuration, create an Envoy Deployment and Service. E.g.:

      kubectl -n myapp apply -f -<<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: envoy\n  labels:\n    app: envoy\nspec:\n  selector:\n    matchLabels:\n      app: envoy\n  template:\n    metadata:\n      labels:\n        app: envoy\n    spec:\n      containers:\n\n        - name: envoy\n          image: envoyproxy/envoy:v1.19-latest\n          command: [\"/usr/local/bin/envoy\"]\n          args:\n            - --config-path /usr/local/etc/envoy/envoy.yaml\n            - --service-cluster front-proxy\n            - --log-level info\n            - --component-log-level filter:trace,http:debug,router:debug\n          ports:\n            - name: web\n              containerPort: 8000 # matches the address of the listener in the envoy config\n          volumeMounts:\n            - name: config\n              mountPath: /usr/local/etc/envoy\n              readOnly: true\n            - name: authorino-ca-cert # in case TLS termination is enabled in Authorino; omit it otherwise\n              subPath: ca.crt\n              mountPath: /etc/ssl/certs/authorino-ca-cert.crt\n              readOnly: true\n      volumes:\n        - name: config\n          configMap:\n            name: envoy\n            items:\n              - key: envoy.yaml\n                path: envoy.yaml\n        - name: authorino-ca-cert # in case TLS termination is enabled in Authorino; omit it otherwise\n          secret:\n            defaultMode: 420\n            secretName: authorino-ca-cert\n  replicas: 1\nEOF\n
      kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Service\nmetadata:\n  name: envoy\nspec:\n  selector:\n    app: envoy\n  ports:\n\n    - name: web\n      port: 8000\n      protocol: TCP\nEOF\n
      "},{"location":"authorino/docs/getting-started/#step-apply-an-authconfig","title":"Step: Apply an AuthConfig","text":"

      Check out the docs for a full description of Authorino's AuthConfig Custom Resource Definition (CRD) and its features.

      For examples based on specific use-cases, check out the User guides.

      For authentication based on OpenID Connect (OIDC) JSON Web Tokens (JWT), plus one simple JWT claim authorization check, a typical AuthConfig custom resource looks like the following:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: my-api-protection\nspec:\n  hosts: # any hosts that resolve to the envoy service and envoy routing config where the external authorization filter is enabled\n\n  - my-api.io # north-south traffic through a Kubernetes `Ingress` or OpenShift `Route`\n  - my-api.myapp.svc.cluster.local # east-west traffic (between applications within the cluster)\n  authentication:\n    \"idp-users\":\n      jwt:\n        issuerUrl: https://my-idp.com/auth/realm\n  authorization:\n    \"check-claim\":\n      patternMatching:\n        patterns:\n        - selector: auth.identity.group\n          operator: eq\n          value: allowed-users\nEOF\n

      After applying the AuthConfig, consumers of the protected service should be able to start sending requests.

      "},{"location":"authorino/docs/getting-started/#clean-up","title":"Clean-up","text":""},{"location":"authorino/docs/getting-started/#remove-protection","title":"Remove protection","text":"

      Delete the AuthConfig:

      kubectl -n myapp delete authconfig/my-api-protection\n

      Decommission the Authorino instance:

      kubectl -n myapp delete authorino/authorino\n
      "},{"location":"authorino/docs/getting-started/#uninstall","title":"Uninstall","text":"

      To completely remove Authorino CRDs, run from the Authorino Operator directory:

      make uninstall\n
      "},{"location":"authorino/docs/getting-started/#next-steps","title":"Next steps","text":"
      1. Read the docs. The Architecture page and the Features page are good starting points to learn more about how Authorino works and its functionalities.
      2. Check out the User guides for several examples of AuthConfigs based on specific use-cases
      "},{"location":"authorino/docs/terminology/","title":"Terminology","text":"

      Here we define some terms that are used in the project, with the goal of avoiding confusion and facilitating more accurate conversations related to Authorino.

      If you see terms used that are not here (or are used in place of terms here) please consider contributing a definition to this doc with a PR, or modifying the use elsewhere to align with these terms.

      "},{"location":"authorino/docs/terminology/#terms","title":"Terms","text":"

      Access token Type of temporary password (security token), tied to an authenticated identity, issued by an auth server as of request from either the identity subject itself or a registered auth client known by the auth server, and that delegates to a party powers to operate on behalf of that identity before a resource server; it can be formatted as an opaque data string or as an encoded JSON Web Token (JWT).

      Application Programming Interface (API) Interface that defines interactions between multiple software applications; (in HTTP communication) set of endpoints and specification to expose resources hosted by a resource server, to be consumed by client applications; the access facade of a resource server.

      Attribute-based Access Control (ABAC) Authorization model that grants/denies access to resources based on evaluation of authorization policies which combine attributes together (from claims, from the request, from the resource, etc).

      Auth Usually employed as a short for authentication and authorization together (AuthN/AuthZ).

      Auth client Application client (software) that uses an auth server, either in the process of authenticating and/or authorizing identity subjects (including self) who want to consume resources from a resources server or auth server.

      Auth server Server where auth clients, users, roles, scopes, resources, policies and permissions can be stored and managed.

      Authentication (AuthN) Process of verifying that a given credential belongs to a claimed-to-be identity; usually resulting in the issuing of an access token.

      Authorization (AuthZ) Process of granting (or denying) access over a resource to a party based on the set of authorization rules, policies and/or permissions enforced.

      Authorization header HTTP request header frequently used to carry credentials to authenticate a user in an HTTP communication, like in requests sent to an API; alternatives usually include credentials carried in another (custom) HTTP header, query string parameter or HTTP cookie.

      Capability Usually employed to refer to a management feature of a Kubernetes-native system, based on the definition and use of Kubernetes Custom Resources (CRDs and CRs), that enables that system to one of the following \u201ccapability levels\u201d: Basic Install, Seamless Upgrades, Full Lifecycle, Deep Insights, Auto Pilot.

      Claim Attribute packed in a security token which represents a claim that one who bears the token is making about an entity, usually an identity subject.

      Client ID Unique identifier of an auth client within an auth server domain (or auth server realm).

      Client secret Password presented by auth clients together with their Client IDs while authenticating with an auth server, either when requesting access tokens to be issued or when consuming services from the auth servers in general.

      Delegation Process of granting a party (usually an auth client) with powers to act, often with limited scope, on behalf of an identity, to access resources from a resource server. See also OAuth2.

      Hash-based Message Authentication Code (HMAC) Specific type of message authentication code (MAC) that involves a cryptographic hash function and a shared secret cryptographic key; it can be used to verify the authenticity of a message and therefore as an authentication method.

      Identity Set of properties that qualifies a subject as a strong identifiable entity (usually a user), who can be authenticated by an auth server. See also Claims.

      Identity and Access Management (IAM) system Auth system that implements and/or connects with sources of identity (IdP) and offers interfaces for managing access (authorization policies and permissions). See also Auth server.

      Identity Provider (IdP) Source of identity; it can be a feature of an auth server or external source connected to an auth server.

      ID token Special type of access token; an encoded JSON Web Token (JWT) that packs claims about an identity.

      JSON Web Token (JWT) JSON Web Tokens are an open, industry standard RFC 7519 method for representing claims securely between two parties.

      JSON Web Signature (JWS) Standard for signing arbitrary data, especially JSON Web Tokens (JWT).

      JSON Web Key Set (JWKS) Set of keys containing the public keys used to verify any JSON Web Token (JWT).

      Keycloak Open source auth server to allow single sign-on with identity and access management.

      Lightweight Directory Access Protocol (LDAP) Open standard for distributed directory information services for sharing of information about users, systems, networks, services and applications.

      Mutual Transport Layer Security (mTLS) Protocol for the mutual authentication of client-server communication, i.e., the client authenticates the server and the server authenticates the client, based on the acceptance of the X.509 certificates of each party.

      OAuth 2.0 (OAuth2) Industry-standard protocol for delegation.

      OpenID Connect (OIDC) Simple identity verification (authentication) layer built on top of the OAuth2 protocol.

      Open Policy Agent (OPA) Authorization policy agent that enables the usage of declarative authorization policies written in Rego language.

      Opaque token Security token devoid of explicit meaning (e.g. random string); it requires the usage of lookup mechanism to be translated into a meaningful set claims representing an identity.

      Permission Association between a protected resource the authorization policies that must be evaluated whether access should be granted; e.g. <user|group|role> CAN DO <action> ON RESOURCE <X>.

      Policy Rule or condition (authorization policy) that must be satisfied to grant access to a resource; strongly related to the different access control mechanisms (ACMs) and strategies one can use to protect resources, e.g. attribute-based access control (ABAC), role-based access control (RBAC), context-based access control, user-based access control (UBAC).

      Policy Administration Point (PAP) Set of UIs and APIs to manage resources servers, resources, scopes, policies and permissions; it is where the auth system is configured.

      Policy Decision Point (PDP) Where the authorization requests are sent, with permissions being requested, and authorization policies are evaluated accordingly.

      Policy Enforcement Point (PEP) Where the authorization is effectively enforced, usually at the resource server or at a proxy, based on a response provided by the Policy Decision Point (PDP).

      Policy storage Where policies are stored and from where they can be fetched, perhaps to be cached.

      Red Hat SSO Auth server; downstream product created from the Keycloak Open Source project.

      Refresh token Special type of security token, often provided together with an access token in an OAuth2 flow, used to renew the duration of an access token before it expires; it requires client authentication.

      Request Party Token (RPT) JSON Web Token (JWT) digitally signed using JSON Web Signature (JWS), issued by the Keycloak auth server.

      Resource One or more endpoints of a system, API or server, that can be protected.

      Resource-level Access Control (RLAC) Authorization model that takes into consideration attributes of each specific request resource to grant/deny access to those resources (e.g. the resource's owner).

      Resource server Server that hosts protected resources.

      Role Aspect of a user\u2019s identity assigned to the user to indicate the level of access they should have to the system; essentially, roles represent collections of permissions

      Role-based Access Control (RBAC) Authorization model that grants/denies access to resources based on the roles of authenticated users (rather than on complex attributes/policy rules).

      Scope Mechanism that defines the specific operations that applications can be allowed to do or information that they can request on an identity\u2019s behalf; often presented as a parameter when access is requested as a way to communicate what access is needed, and used by auth server to respond what actual access is granted.

      Single Page Application (SPA) Web application or website that interacts with the user by dynamically rewriting the current web page with new data from the web server.

      Single Sign-on (SSO) Authentication scheme that allows a user to log in with a single ID and password to any of several related, yet independent, software systems.

      Upstream (In the context of authentication/authorization) API whose endpoints must be protected by the auth system; the unprotected service in front of which a protection layer is added (by connecting with a Policy Decision Point).

      User-based Access Control (UBAC) Authorization model that grants/denies access to resources based on claims of the identity (attributes of the user).

      User-Managed Access (UMA) OAuth2-based access management protocol, used for users of an auth server to control the authorization process, i.e. directly granting/denying access to user-owned resources to other requesting parties.

      "},{"location":"authorino/docs/user-guides/","title":"User guides","text":"
      • Hello World The basics of protecting an API with Authorino.

      • Authentication with Kubernetes tokens (TokenReview API) Validate Kubernetes Service Account tokens to authenticate requests to your protected hosts.

      • Authentication with API keys Issue API keys stored in Kubernetes Secrets for clients to authenticate with your protected hosts.

      • Authentication with X.509 certificates and mTLS Verify client X.509 certificates against trusted root CAs.

      • OpenID Connect Discovery and authentication with JWTs Validate JSON Web Tokens (JWT) issued and signed by an OpenID Connect server; leverage OpenID Connect Discovery to automatically fetch JSON Web Key Sets (JWKS).

      • OAuth 2.0 token introspection (RFC 7662) Introspect OAuth 2.0 access tokens (e.g. opaque tokens) for online user data and token validation in request-time.

      • Passing credentials (Authorization header, cookie headers and others) Customize where credentials are supplied in the request by each trusted source of identity.

      • HTTP \"Basic\" Authentication (RFC 7235) Turn Authorino API key Secrets settings into HTTP basic auth.

      • Anonymous access Bypass identity verification or fall back to anonymous access when credentials fail to validate

      • Token normalization Normalize identity claims from trusted sources and reduce complexity in your policies.

      • Edge Authentication Architecture (EAA) Exchange satellite (outer-layer) authentication tokens for \"Festival Wristbands\" accepted ubiquitously at the inside of your network. Normalize from multiple and varied sources of identity and authentication methods in the edge of your architecture; filter privacy data, limit the scope of permissions, and simplify authorization rules to your internal microservices.

      • Fetching auth metadata from external sources Get online data from remote HTTP services to enhance authorization rules.

      • OpenID Connect UserInfo Fetch user info for OpenID Connect ID tokens in request-time for extra metadata for your policies and online verification of token validity.

      • Resource-level authorization with User-Managed Access (UMA) resource registry Fetch resource attributes relevant for authorization from a User-Managed Access (UMA) resource registry such as Keycloak resource server clients.

      • Simple pattern-matching authorization policies Write simple authorization rules based on JSON patterns matched against Authorino's Authorization JSON; check contextual information of the request, validate JWT claims, cross metadata fetched from external sources, etc.

      • OpenID Connect (OIDC) and Role-Based Access Control (RBAC) with Authorino and Keycloak Combine OpenID Connect (OIDC) authentication and Role-Based Access Control (RBAC) authorization rules leveraging Keycloak and Authorino working together.

      • Open Policy Agent (OPA) Rego policies Leverage the power of Open Policy Agent (OPA) policies, evaluated against Authorino's Authorization JSON in a built-in runtime compiled together with Authorino; pre-cache policies defined in Rego language inline or fetched from an external policy registry.

      • Kubernetes RBAC for service authorization (SubjectAccessReview API) Manage permissions in the Kubernetes RBAC and let Authorino to check them in request-time with the authorization system of the cluster.

      • Authorization with Keycloak Authorization Services Use Authorino as an adapter for Keycloak Authorization Services without importing any library or rebuilding your application code.

      • Integration with Authzed/SpiceDB Permission requests sent to a Google Zanzibar-based Authzed/SpiceDB instance, via gRPC.

      • Injecting data in the request Inject HTTP headers with serialized JSON content.

      • Authenticated rate limiting (with Envoy Dynamic Metadata) Provide Envoy with dynamic metadata from the external authorization process to be injected and used by consecutive filters, such as by a rate limiting service.

      • Redirecting to a login page Customize response status code and headers on failed requests. E.g. redirect users of a web application protected with Authorino to a login page instead of a 401 Unauthorized; mask resources on access denied behind a 404 Not Found response instead of 403 Forbidden.

      • Mixing Envoy built-in filter for auth and Authorino Have JWT validation handled by Envoy beforehand and the JWT payload injected into the request to Authorino, to be used in custom authorization policies defined in a AuthConfig.

      • Host override via context extension Induce the lookup of an AuthConfig by supplying extended host context, for use cases such as of path prefix-based lookup and wildcard subdomains lookup.

      • Using Authorino as ValidatingWebhook service Use Authorino as a generic Kubernetes ValidatingWebhook service where the rules to validate a request to the Kubernetes API are written in an AuthConfig.

      • Reducing the operational space: sharding, noise and multi-tenancy Have multiple instances of Authorino running in the same space (Kubernetes namespace or cluster-scoped), yet watching particular sets of resources.

      • Caching Cache auth objects resolved at runtime for any configuration bit of an AuthConfig, for easy access in subsequent requests whenever an arbitrary cache key repeats, until the cache entry expires.

      • Observability Prometheus metrics exported by Authorino, readiness probe, logging, tracing, etc.

      "},{"location":"authorino/docs/user-guides/anonymous-access/","title":"User guide: Anonymous access","text":"

      Bypass identity verification or fall back to anonymous access when credentials fail to validate

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 Anonymous access

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/anonymous-access/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/anonymous-access/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/anonymous-access/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/anonymous-access/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/anonymous-access/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/anonymous-access/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"public\":\n      anonymous: {}\nEOF\n

      The example above enables anonymous access (i.e. removes authentication), without adding any extra layer of protection to the API. This is virtually equivalent to setting a top-level condition to the AuthConfig that always skips the configuration, or to switching authentication/authorization off completely in the route to the API.

      For more sophisticated use cases of anonymous access with Authorino, consider combining this feature with other identity sources in the AuthConfig while playing with the priorities of each source, as well as combination with when conditions, and/or adding authorization policies that either cover authentication or address anonymous access with proper rules (e.g. enforcing read-only access).

      Check out the docs for the Anonymous access feature for an example of an AuthConfig that falls back to anonymous access when a priority OIDC/JWT-based authentication fails, and enforces a read-only policy in such cases.

      "},{"location":"authorino/docs/user-guides/anonymous-access/#consume-the-api","title":"\u277b Consume the API","text":"
      curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/anonymous-access/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/api-key-authentication/","title":"User guide: Authentication with API keys","text":"

      Issue API keys stored in Kubernetes Secrets for clients to authenticate with your protected hosts.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 API key

      In Authorino, API keys are stored as Kubernetes Secrets. Each resource must contain an api_key entry with the value of the API key, and labeled to match the selectors specified in spec.identity.apiKey.selector of the AuthConfig.

      API key Secrets must also include labels that match the secretLabelSelector field of the Authorino instance. See Resource reconciliation and status update for details.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/api-key-authentication/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/api-key-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/api-key-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/api-key-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/api-key-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/api-key-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\nEOF\n
      "},{"location":"authorino/docs/user-guides/api-key-authentication/#create-an-api-key","title":"\u277b Create an API key","text":"
      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/api-key-authentication/#consume-the-api","title":"\u277c Consume the API","text":"

      With a valid API key:

      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      With missing or invalid API key:

      curl -H 'Authorization: APIKEY invalid' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"friends\"\n# x-ext-auth-reason: the API Key provided is invalid\n
      "},{"location":"authorino/docs/user-guides/api-key-authentication/#delete-an-api-key-revoke-access-to-the-api","title":"\u277d Delete an API key (revoke access to the API)","text":"
      kubectl delete secret/api-key-1\n
      "},{"location":"authorino/docs/user-guides/api-key-authentication/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/","title":"User guide: Authenticated rate limiting (with Envoy Dynamic Metadata)","text":"

      Provide Envoy with dynamic metadata about the external authorization process to be injected into the rate limiting filter.

      Authorino capabilities featured in this guide:
      • Dynamic response \u2192 Response wrappers \u2192 Envoy Dynamic Metadata
      • Dynamic response \u2192 JSON injection
      • Identity verification & authentication \u2192 API key

      Dynamic JSON objects built out of static values and values fetched from the Authorization JSON can be wrapped to be returned to the reverse-proxy as Envoy Well Known Dynamic Metadata content. Envoy can use those to inject data returned by the external authorization service into the other filters, such as the rate limiting filter.

      Check out as well the user guides about Injecting data in the request and Authentication with API keys.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.

      At step \u277b, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-limitador","title":"\u2778 Deploy Limitador","text":"

      Limitador is a lightweight rate limiting service that can be used with Envoy.

      On this bundle, we will deploy Limitador pre-configured to limit requests to the talker-api domain up to 5 requests per interval of 60 seconds per user_id. Envoy will be configured to recognize the presence of Limitador and activate it on requests to the Talker API.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#deploy-the-talker-api","title":"\u2779 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#setup-envoy","title":"\u277a Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#create-an-authconfig","title":"\u277b Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      An annotation auth-data/username will be read from the Kubernetes API Key secret and passed as dynamic metadata { \"ext_auth_data\": { \"username\": \u00abannotations.auth-data/username\u00bb } }.

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  response:\n    success:\n      dynamicMetadata:\n        \"rate-limit\":\n          json:\n            properties:\n              \"username\":\n                expression: auth.identity.metadata.annotations['auth-data/username']\n          key: ext_auth_data # how this bit of dynamic metadata from the ext authz service is named in the Envoy config\nEOF\n

      Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON.

      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#create-the-api-keys","title":"\u277c Create the API keys","text":"

      For user John:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\n  annotations:\n    auth-data/username: john\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n

      For user Jane:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-2\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\n  annotations:\n    auth-data/username: jane\nstringData:\n  api_key: 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#consume-the-api","title":"\u277d Consume the API","text":"

      As John:

      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n

      Repeat the request a few more times within the 60-second time window, until the response status is 429 Too Many Requests.

      While the API is still limited to John, send requests as Jane:

      curl -H 'Authorization: APIKEY 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY' http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/authenticated-rate-limiting-envoy-dynamic-metadata/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete secret/api-key-1\nkubectl delete secret/api-key-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/limitador/limitador-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/authzed/","title":"User guide: Integration with Authzed/SpiceDB","text":"

      Permission requests sent to a Google Zanzibar-based Authzed/SpiceDB instance, via gRPC.

      Authorino capabilities featured in this guide:
      • Authorization \u2192 SpiceDB
      • Identity verification & authentication \u2192 API key

      "},{"location":"authorino/docs/user-guides/authzed/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.

      At step \u277b, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/authzed/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/authzed/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/authzed/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/authzed/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/authzed/#create-the-permission-database","title":"\u277a Create the permission database","text":"

      Create the namespace:

      kubectl create namespace spicedb\n

      Create the SpiceDB instance:

      kubectl -n spicedb apply -f -<<EOF\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: spicedb\n  labels:\n    app: spicedb\nspec:\n  selector:\n    matchLabels:\n      app: spicedb\n  template:\n    metadata:\n      labels:\n        app: spicedb\n    spec:\n      containers:\n\n      - name: spicedb\n        image: authzed/spicedb\n        args:\n        - serve\n        - \"--grpc-preshared-key\"\n        - secret\n        - \"--http-enabled\"\n        ports:\n        - containerPort: 50051\n        - containerPort: 8443\n  replicas: 1\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: spicedb\nspec:\n  selector:\n    app: spicedb\n  ports:\n    - name: grpc\n      port: 50051\n      protocol: TCP\n    - name: http\n      port: 8443\n      protocol: TCP\nEOF\n

      Forward local request to the SpiceDB service inside the cluster:

      kubectl -n spicedb port-forward service/spicedb 8443:8443 2>&1 >/dev/null &\n

      Create the permission schema:

      curl -X POST http://localhost:8443/v1/schema/write \\\n  -H 'Authorization: Bearer secret' \\\n  -H 'Content-Type: application/json' \\\n  -d @- << EOF\n{\n  \"schema\": \"definition blog/user {}\\ndefinition blog/post {\\n\\trelation reader: blog/user\\n\\trelation writer: blog/user\\n\\n\\tpermission read = reader + writer\\n\\tpermission write = writer\\n}\"\n}\nEOF\n

      Create the relationships:

      • blog/user:emilia \u2192 writer of blog/post:1
      • blog/user:beatrice \u2192 reader of blog/post:1
      curl -X POST http://localhost:8443/v1/relationships/write \\\n  -H 'Authorization: Bearer secret' \\\n  -H 'Content-Type: application/json' \\\n  -d @- << EOF\n{\n  \"updates\": [\n    {\n      \"operation\": \"OPERATION_CREATE\",\n      \"relationship\": {\n        \"resource\": {\n          \"objectType\": \"blog/post\",\n          \"objectId\": \"1\"\n        },\n        \"relation\": \"writer\",\n        \"subject\": {\n          \"object\": {\n            \"objectType\": \"blog/user\",\n            \"objectId\": \"emilia\"\n          }\n        }\n      }\n    },\n    {\n      \"operation\": \"OPERATION_CREATE\",\n      \"relationship\": {\n        \"resource\": {\n          \"objectType\": \"blog/post\",\n          \"objectId\": \"1\"\n        },\n        \"relation\": \"reader\",\n        \"subject\": {\n          \"object\": {\n            \"objectType\": \"blog/user\",\n            \"objectId\": \"beatrice\"\n          }\n        }\n      }\n    }\n  ]\n}\nEOF\n
      "},{"location":"authorino/docs/user-guides/authzed/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.

      Store the shared token for Authorino to authenticate with the SpiceDB instance in a Service:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: spicedb\n  labels:\n    app: spicedb\nstringData:\n  grpc-preshared-key: secret\nEOF\n

      Create the AuthConfig:

      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"blog-users\":\n      apiKey:\n        selector:\n          matchLabels:\n            app: talker-api\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  authorization:\n    \"authzed-spicedb\":\n      spicedb:\n        endpoint: spicedb.spicedb.svc.cluster.local:50051\n        insecure: true\n        sharedSecretRef:\n          name: spicedb\n          key: grpc-preshared-key\n        subject:\n          kind:\n            value: blog/user\n          name:\n            selector: auth.identity.metadata.annotations.username\n        resource:\n          kind:\n            value: blog/post\n          name:\n            selector: context.request.http.path.@extract:{\"sep\":\"/\",\"pos\":2}\n        permission:\n          selector: context.request.http.method.@replace:{\"old\":\"GET\",\"new\":\"read\"}.@replace:{\"old\":\"POST\",\"new\":\"write\"}\nEOF\n
      "},{"location":"authorino/docs/user-guides/authzed/#create-the-api-keys","title":"\u277c Create the API keys","text":"

      For Emilia (writer):

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-writer\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: talker-api\n  annotations:\n    username: emilia\nstringData:\n  api_key: IAMEMILIA\nEOF\n

      For Beatrice (reader):

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-reader\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    app: talker-api\n  annotations:\n    username: beatrice\nstringData:\n  api_key: IAMBEATRICE\nEOF\n
      "},{"location":"authorino/docs/user-guides/authzed/#consume-the-api","title":"\u277d Consume the API","text":"

      As Emilia, send a GET request:

      curl -H 'Authorization: APIKEY IAMEMILIA' \\\n     -X GET \\\n     http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n

      As Emilia, send a POST request:

      curl -H 'Authorization: APIKEY IAMEMILIA' \\\n     -X POST \\\n     http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n

      As Beatrice, send a GET request:

      curl -H 'Authorization: APIKEY IAMBEATRICE' \\\n     -X GET \\\n     http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 200 OK\n

      As Beatrice, send a POST request:

      curl -H 'Authorization: APIKEY IAMBEATRICE' \\\n     -X POST \\\n     http://talker-api.127.0.0.1.nip.io:8000/posts/1 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: PERMISSIONSHIP_NO_PERMISSION;token=GhUKEzE2NzU3MDE3MjAwMDAwMDAwMDA=\n
      "},{"location":"authorino/docs/user-guides/authzed/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace spicedb\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/caching/","title":"User guide: Caching","text":"

      Cache auth objects resolved at runtime for any configuration bit of an AuthConfig (i.e. any evaluator), of any phase (identity, metadata, authorization and dynamic response), for easy access in subsequent requests, whenever an arbitrary (user-defined) cache key repeats, until the cache entry expires.

      This is particularly useful for configuration bits whose evaluation is significantly more expensive than accessing the cache. E.g.:

      • Caching of metadata fetched from external sources in general
      • Caching of previously validated identity access tokens (e.g. for OAuth2 opaque tokens that involve consuming the token introspection endpoint of an external auth server)
      • Caching of complex Rego policies that involve sending requests to external services

      Cases where one will NOT want to enable caching, due to relatively cheap compared to accessing and managing the cache:

      • Validation of OIDC/JWT access tokens
      • OPA/Rego policies that do not involve external requests
      • JSON pattern-matching authorization
      • Dynamic JSON responses
      • Anonymous access
      Authorino capabilities featured in this guide:
      • Common feature \u2192 Caching
      • Identity verification & authentication \u2192 Anonymous access
      • External auth metadata \u2192 HTTP GET/GET-by-POST
      • Authorization \u2192 Open Policy Agent (OPA) Rego policies
      • Dynamic response \u2192 JSON injection

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/caching/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/caching/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/caching/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/caching/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/caching/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/caching/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      The example below enables caching for the external source of metadata, which in this case, for convenience, is the same upstream API protected by Authorino (i.e. the Talker API), though consumed directly by Authorino, without passing through the proxy. This API generates a uuid random hash that it injects in the JSON response. This value is different in every request processed by the API.

      The example also enables caching of returned OPA virtual documents. cached-authz is a trivial Rego policy that always grants access, but generates a timestamp, which Authorino will cache.

      In both cases, the path of the HTTP request is used as cache key. I.e., whenever the path repeats, Authorino reuse the values stored previously in each cache table (cached-metadata and cached-authz), respectively saving a request to the external source of metadata and the evaluation of the OPA policy. Cache entries will expire in both cases after 60 seconds they were stored in the cache.

      The cached values will be visible in the response returned by the Talker API in x-authz-data header injected by Authorino. This way, we can tell when an existing value in the cache was used and when a new one was generated and stored.

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"anonymous\":\n      anonymous: {}\n  metadata:\n    \"cached-metadata\":\n      http:\n        url: \"http://talker-api.default.svc.cluster.local:3000/metadata/{context.request.http.path}\"\n      cache:\n        key:\n          selector: context.request.http.path\n        ttl: 60\n  authorization:\n    \"cached-authz\":\n      opa:\n        rego: |\n          now = time.now_ns()\n          allow = true\n        allValues: true\n      cache:\n        key:\n          selector: context.request.http.path\n        ttl: 60\n  response:\n    success:\n      headers:\n        \"x-authz-data\":\n          json:\n            properties:\n              \"cached-metadata\":\n                selector: auth.metadata.cached-metadata.uuid\n              \"cached-authz\":\n                selector: auth.authorization.cached-authz.now\nEOF\n
      "},{"location":"authorino/docs/user-guides/caching/#consume-the-api","title":"\u277b Consume the API","text":"
      1. To /hello
      curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n#  \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343067462380300\\\",\\\"cached-metadata\\\":\\\"92c111cd-a10f-4e86-8bf0-e0cd646c6f79\\\"}\",\n# [\u2026]\n
      1. To a different path
      curl http://talker-api.127.0.0.1.nip.io:8000/goodbye\n# [\u2026]\n#  \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343097860450300\\\",\\\"cached-metadata\\\":\\\"37fce386-1ee8-40a7-aed1-bf8a208f283c\\\"}\",\n# [\u2026]\n
      1. To /hello again before the cache entry expires (60 seconds from the first request sent to this path)
      curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n#  \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343067462380300\\\",\\\"cached-metadata\\\":\\\"92c111cd-a10f-4e86-8bf0-e0cd646c6f79\\\"}\",  <=== same cache-id as before\n# [\u2026]\n
      1. To /hello again after the cache entry expires (60 seconds from the first request sent to this path)
      curl http://talker-api.127.0.0.1.nip.io:8000/hello\n# [\u2026]\n#  \"X-Authz-Data\": \"{\\\"cached-authz\\\":\\\"1649343135702743800\\\",\\\"cached-metadata\\\":\\\"e708a3a6-5caf-4028-ab5c-573ad9be7188\\\"}\",  <=== different cache-id\n# [\u2026]\n
      "},{"location":"authorino/docs/user-guides/caching/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/","title":"User guide: Redirecting to a login page","text":"

      Customize response status code and headers on failed requests to redirect users of a web application protected with Authorino to a login page instead of a 401 Unauthorized.

      Authorino capabilities featured in this guide:
      • Dynamic response \u2192 Custom denial status
      • Identity verification & authentication \u2192 API key
      • Identity verification & authentication \u2192 JWT verification

      Authorino's default response status codes, messages and headers for unauthenticated (401) and unauthorized (403) requests can be customized with static values and values fetched from the Authorization JSON.

      Check out as well the user guides about HTTP \"Basic\" Authentication (RFC 7235) and OpenID Connect Discovery and authentication with JWTs.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample web application called Matrix Quotes to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#deploy-the-matrix-quotes-web-application","title":"\u2778 Deploy the Matrix Quotes web application","text":"

      The Matrix Quotes is a static web application that contains quotes from the film The Matrix.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/matrix-quotes-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Matrix Quotes webapp behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/envoy-deploy.yaml\n

      The command above creates an Ingress with host name matrix-quotes.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: matrix-quotes-protection\nspec:\n  hosts:\n\n  - matrix-quotes.127.0.0.1.nip.io\n  authentication:\n    \"browser-users\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: users\n      credentials:\n        cookie:\n          name: TOKEN\n    \"http-basic-auth\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: users\n      credentials:\n        authorizationHeader:\n          prefix: Basic\n  response:\n    unauthenticated:\n      code: 302\n      headers:\n        \"Location\":\n          expression: |\n            'http://matrix-quotes.127.0.0.1.nip.io:8000/login.html?redirect_to=' + request.path\nEOF\n

      Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON.

      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#create-an-api-key","title":"\u277b Create an API key","text":"
      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: user-credential-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: users\nstringData:\n  api_key: am9objpw # john:p\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#consume-the-application","title":"\u277c Consume the application","text":"

      On a web browser, navigate to http://matrix-quotes.127.0.0.1.nip.io:8000.

      Click on the cards to read quotes from characters of the movie. You should be redirected to login page.

      Log in using John's credentials:

      • Username: john
      • Password: p

      Click again on the cards and check that now you are able to access the inner pages.

      You can also consume a protected endpoint of the application using HTTP Basic Authentication:

      curl -u john:p http://matrix-quotes.127.0.0.1.nip.io:8000/neo.html\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#optional-modify-the-authconfig-to-authenticate-with-oidc","title":"\u277d (Optional) Modify the AuthConfig to authenticate with OIDC","text":""},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#setup-a-keycloak-server","title":"Setup a Keycloak server","text":"

      Deploy a Keycloak server preloaded with a realm named kuadrant:

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      Resolve local Keycloak domain so it can be accessed from the local host and inside the cluster with the name: (This will be needed to redirect to Keycloak's login page and at the same time validate issued tokens.)

      echo '127.0.0.1 keycloak' >> /etc/hosts\n

      Forward local requests to the instance of Keycloak running in the cluster:

      kubectl port-forward deployment/keycloak 8080:8080 2>&1 >/dev/null &\n

      Create a client:

      curl -H \"Authorization: Bearer $(curl http://keycloak:8080/realms/master/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=admin-cli' -d 'username=admin' -d 'password=p' | jq -r .access_token)\" \\\n     -H 'Content-type: application/json' \\\n     -d '{ \"name\": \"matrix-quotes\", \"clientId\": \"matrix-quotes\", \"publicClient\": true, \"redirectUris\": [\"http://matrix-quotes.127.0.0.1.nip.io:8000/auth*\"], \"enabled\": true }' \\\n     http://keycloak:8080/admin/realms/kuadrant/clients\n
      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#reconfigure-the-matrix-quotes-app-to-use-keycloaks-login-page","title":"Reconfigure the Matrix Quotes app to use Keycloak's login page","text":"
      kubectl set env deployment/matrix-quotes KEYCLOAK_REALM=http://keycloak:8080/realms/kuadrant CLIENT_ID=matrix-quotes\n
      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#apply-the-changes-to-the-authconfig","title":"Apply the changes to the AuthConfig","text":"
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: matrix-quotes-protection\nspec:\n  hosts:\n\n  - matrix-quotes.127.0.0.1.nip.io\n  authentication:\n    \"idp-users\":\n      jwt:\n        issuerUrl: http://keycloak:8080/realms/kuadrant\n      credentials:\n        cookie:\n          name: TOKEN\n  response:\n    unauthenticated:\n      code: 302\n      headers:\n        \"Location\":\n          expression: |\n            'http://keycloak:8080/realms/kuadrant/protocol/openid-connect/auth?client_id=matrix-quotes&redirect_uri=http://matrix-quotes.127.0.0.1.nip.io:8000/auth?redirect_to=' + request.path + '&scope=openid&response_type=code'\nEOF\n
      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#consume-the-application-again","title":"Consume the application again","text":"

      Refresh the browser window or navigate again to http://matrix-quotes.127.0.0.1.nip.io:8000.

      Click on the cards to read quotes from characters of the movie. You should be redirected to login page this time served by the Keycloak server.

      Log in as Jane (a user of the Keycloak realm):

      • Username: jane
      • Password: p

      Click again on the cards and check that now you are able to access the inner pages.

      "},{"location":"authorino/docs/user-guides/deny-with-redirect-to-login/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete secret/user-credential-1\nkubectl delete authconfig/matrix-quotes-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/matrix-quotes/matrix-quotes-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/","title":"User guide: Edge Authentication Architecture (EAA)","text":"

      Edge Authentication Architecture (EAA) is a pattern where more than extracting authentication logics and specifics from the application codebase to a proper authN/authZ layer, this is pushed to the edge of your cloud network, without violating the Zero Trust principle nevertheless.

      The very definition of \"edge\" is subject to discussion, but the underlying idea is that clients (e.g. API clients, IoT devices, etc.) authenticate with a layer that, before moving traffic to inside the network:

      • understands the complexity of all the different methods of authentication supported;
      • sometimes some token normalization is involved;
      • eventually enforces some preliminary authorization policies; and
      • possibly filters data bits that are sensitive to privacy concerns (e.g. to comply with local legislation such as GRPD, CCPA, etc)

      As a minimum, EAA allows to simplify authentication between applications and microservices inside the network, as well as to reduce authorization to domain-specific rules and policies, rather than having to deal all the complexity to support all types of clients in every node.

      Authorino capabilities featured in this guide:
      • Dynamic response \u2192 Festival Wristband tokens
      • Identity verification & authentication \u2192 Identity extension
      • Identity verification & authentication \u2192 API key
      • Identity verification & authentication \u2192 JWT verification

      Festival Wristbands are OpenID Connect ID tokens (signed JWTs) issued by Authorino by the end of the Auth Pipeline, for authorized requests. It can be configured to include claims based on static values and values fetched from the Authorization JSON.

      Check out as well the user guides about Token normalization, Authentication with API keys and OpenID Connect Discovery and authentication with JWTs.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
      • jq, to extract parts of JSON responses
      • jwt, to inspect JWTs (optional)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      The next steps walk you through installing Authorino and configuring 2 environments of an architecture, edge and internal.

      The first environment is a facade for handling the first layer of authentication and exchanging any valid presented authentication token for a Festival Wristband token. In the second, we will deploy a sample service called Talker API that the authorization service will ensure to receive only authenticated traffic presented with a valid Festival Wristband.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u2779.

      At steps \u2779 and \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-namespaces","title":"\u2777 Create the namespaces","text":"

      For simplicity, this examples will set up edge and internal nodes in different namespaces of the same Kubernetes cluster. Those will share a same single cluster-wide Authorino instance. In real-life scenarios, it does not have to be like that.

      kubectl create namespace authorino\nkubectl create namespace edge\nkubectl create namespace internal\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#deploy-authorino","title":"\u2778 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources cluster-wide2, with TLS disabled3.

      kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  clusterWide: true\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-the-edge","title":"\u2779 Setup the Edge","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-envoy","title":"Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up external authorization with the Authorino instance.4

      kubectl -n edge apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/eaa/envoy-edge-deploy.yaml\n

      The command above creates an Ingress with host name edge.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 9000 to the Envoy service running inside the cluster:

      kubectl -n edge port-forward deployment/envoy 9000:9000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-authconfig","title":"Create the AuthConfig","text":"

      Create a required secret that will be used by Authorino to sign the Festival Wristband tokens:

      kubectl -n edge apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: wristband-signing-key\nstringData:\n  key.pem: |\n    -----BEGIN EC PRIVATE KEY-----\n    MHcCAQEEIDHvuf81gVlWGo0hmXGTAnA/HVxGuH8vOc7/8jewcVvqoAoGCCqGSM49\n    AwEHoUQDQgAETJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZxJKDysoGwn\n    cnUvHIu23SgW+Ee9lxSmZGhO4eTdQeKxMA==\n    -----END EC PRIVATE KEY-----\ntype: Opaque\nEOF\n

      Create the config:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl -n edge apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: edge-auth\nspec:\n  hosts:\n\n  - edge.127.0.0.1.nip.io\n  authentication:\n    \"api-clients\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino.kuadrant.io/managed-by: authorino\n        allNamespaces: true\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n      overrides:\n        \"username\":\n          selector: auth.identity.metadata.annotations.authorino\\.kuadrant\\.io/username\n    \"idp-users\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n      defaults:\n        \"username\":\n          selector: auth.identity.preferred_username\n  response:\n    success:\n      dynamicMetadata:\n        \"wristband\":\n          wristband:\n            issuer: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\n            customClaims:\n              \"username\":\n                selector: auth.identity.username\n            tokenDuration: 300\n            signingKeyRefs:\n            - name: wristband-signing-key\n              algorithm: ES256\nEOF\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-the-internal-workload","title":"\u277a Setup the internal workload","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#deploy-the-talker-api","title":"Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl -n internal apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#setup-envoy_1","title":"Setup Envoy","text":"

      This other bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.

      kubectl -n internal apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/eaa/envoy-node-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl -n internal port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-the-authconfig_1","title":"Create the AuthConfig","text":"Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl -n internal apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"edge-authenticated\":\n      jwt:\n        issuerUrl: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\nEOF\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#create-an-api-key","title":"\u277b Create an API key","text":"
      kubectl -n edge apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n  annotations:\n    authorino.kuadrant.io/username: alice\n    authorino.kuadrant.io/email: alice@host\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#consume-the-api","title":"\u277c Consume the API","text":""},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#using-the-api-key-to-authenticate","title":"Using the API key to authenticate","text":"

      Authenticate at the edge:

      WRISTBAND_TOKEN=$(curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://edge.127.0.0.1.nip.io:9000/auth -is | tr -d '\\r' | sed -En 's/^x-wristband-token: (.*)/\\1/p')\n

      Consume the API:

      curl -H \"Authorization: Bearer $WRISTBAND_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n

      Try to consume the API with authentication token that is only accepted in the edge:

      curl -H \"Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"edge-authenticated\"\n# x-ext-auth-reason: credential not found\n

      (Optional) Inspect the wristband token and verify that it only contains restricted info to authenticate and authorize with internal apps.

      jwt decode $WRISTBAND_TOKEN\n# [...]\n#\n# Token claims\n# ------------\n# {\n#   \"exp\": 1638452051,\n#   \"iat\": 1638451751,\n#   \"iss\": \"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/edge/edge-auth/wristband\",\n#   \"sub\": \"02cb51ea0e1c9f3c0960197a2518c8eb4f47e1b9222a968ffc8d4c8e783e4d19\",\n#   \"username\": \"alice\"\n# }\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#authenticating-with-the-keycloak-server","title":"Authenticating with the Keycloak server","text":"

      Obtain an access token with the Keycloak server for Jane:

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:

      ACCESS_TOKEN=$(kubectl -n edge run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      (Optional) Inspect the access token issue by Keycloak and verify and how it contains more details about the identity than required to authenticate and authorize with internal apps.

      jwt decode $ACCESS_TOKEN\n# [...]\n#\n# Token claims\n# ------------\n# { [...]\n#   \"email\": \"jane@kuadrant.io\",\n#   \"email_verified\": true,\n#   \"exp\": 1638452220,\n#   \"family_name\": \"Smith\",\n#   \"given_name\": \"Jane\",\n#   \"iat\": 1638451920,\n#   \"iss\": \"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\",\n#   \"jti\": \"699f6e49-dea4-4f29-ae2a-929a3a18c94b\",\n#   \"name\": \"Jane Smith\",\n#   \"preferred_username\": \"jane\",\n#   \"realm_access\": {\n#     \"roles\": [\n#       \"offline_access\",\n#       \"member\",\n#       \"admin\",\n#       \"uma_authorization\"\n#     ]\n#   },\n# [...]\n

      As Jane, obtain a limited wristband token at the edge:

      WRISTBAND_TOKEN=$(curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://edge.127.0.0.1.nip.io:9000/auth -is | tr -d '\\r' | sed -En 's/^x-wristband-token: (.*)/\\1/p')\n

      Consume the API:

      curl -H \"Authorization: Bearer $WRISTBAND_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/edge-authentication-architecture-festival-wristbands/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete namespace edge\nkubectl delete namespace internal\nkubectl delete namespace authorino\nkubectl delete namespace keycloak\n

      To uninstall the Authorino and Authorino Operator manifests, run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. cluster-wide reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/","title":"User guide: Mixing Envoy built-in filter for auth and Authorino","text":"

      Have JWT validation handled by Envoy beforehand and the JWT payload injected into the request to Authorino, to be used in custom authorization policies defined in a AuthConfig.

      In this user guide, we will set up Envoy and Authorino to protect a service called the Talker API service, with JWT authentication handled in Envoy and a more complex authorization policy enforced in Authorino.

      The policy defines a geo-fence by which only requests originated in Great Britain (country code: GB) will be accepted, unless the user is bound to a role called 'admin' in the auth server, in which case no geofence is enforced.

      All requests to the Talker API will be authenticated in Envoy. However, requests to /global will not trigger the external authorization.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 Plain
      • External auth metadata \u2192 HTTP GET/GET-by-POST
      • Authorization \u2192 Pattern-matching authorization
      • Dynamic response \u2192 Custom denial status

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
      • jq, to extract parts of JSON responses

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.

      At step \u277b, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following command deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  labels:\n    app: authorino\n  name: envoy\ndata:\n  envoy.yaml: |\n    static_resources:\n      clusters:\n\n      - name: talker-api\n        connect_timeout: 0.25s\n        type: strict_dns\n        lb_policy: round_robin\n        load_assignment:\n          cluster_name: talker-api\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: talker-api\n                    port_value: 3000\n      - name: keycloak\n        connect_timeout: 0.25s\n        type: logical_dns\n        lb_policy: round_robin\n        load_assignment:\n          cluster_name: keycloak\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: keycloak.keycloak.svc.cluster.local\n                    port_value: 8080\n      - name: authorino\n        connect_timeout: 0.25s\n        type: strict_dns\n        lb_policy: round_robin\n        http2_protocol_options: {}\n        load_assignment:\n          cluster_name: authorino\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: authorino-authorino-authorization\n                    port_value: 50051\n      listeners:\n      - address:\n          socket_address:\n            address: 0.0.0.0\n            port_value: 8000\n        filter_chains:\n        - filters:\n          - name: envoy.http_connection_manager\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n              stat_prefix: local\n              route_config:\n                name: local_route\n                virtual_hosts:\n                - name: local_service\n                  domains: ['*']\n                  routes:\n                  - match: { path_separated_prefix: /global }\n                    route: { cluster: talker-api }\n                    typed_per_filter_config:\n                      envoy.filters.http.ext_authz:\n                        \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n                        disabled: true\n                  - match: { prefix: / }\n                    route: { cluster: talker-api }\n              http_filters:\n              - name: envoy.filters.http.jwt_authn\n                typed_config:\n                  \"@type\": type.googleapis.com/envoy.extensions.filters.http.jwt_authn.v3.JwtAuthentication\n                  providers:\n                    keycloak:\n                      issuer: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n                      remote_jwks:\n                        http_uri:\n                          uri: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/certs\n                          cluster: keycloak\n                          timeout: 5s\n                        cache_duration:\n                          seconds: 300\n                      payload_in_metadata: verified_jwt\n                  rules:\n                  - match: { prefix: / }\n                    requires: { provider_name: keycloak }\n              - name: envoy.filters.http.ext_authz\n                typed_config:\n                  \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n                  transport_api_version: V3\n                  failure_mode_allow: false\n                  metadata_context_namespaces:\n                  - envoy.filters.http.jwt_authn\n                  grpc_service:\n                    envoy_grpc:\n                      cluster_name: authorino\n                    timeout: 1s\n              - name: envoy.filters.http.router\n                typed_config:\n                  \"@type\": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router\n              use_remote_address: true\n    admin:\n      access_log_path: \"/tmp/admin_access.log\"\n      address:\n        socket_address:\n          address: 0.0.0.0\n          port_value: 8001\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: authorino\n    svc: envoy\n  name: envoy\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: authorino\n      svc: envoy\n  template:\n    metadata:\n      labels:\n        app: authorino\n        svc: envoy\n    spec:\n      containers:\n      - args:\n        - --config-path /usr/local/etc/envoy/envoy.yaml\n        - --service-cluster front-proxy\n        - --log-level info\n        - --component-log-level filter:trace,http:debug,router:debug\n        command:\n        - /usr/local/bin/envoy\n        image: envoyproxy/envoy:v1.22-latest\n        name: envoy\n        ports:\n        - containerPort: 8000\n          name: web\n        - containerPort: 8001\n          name: admin\n        volumeMounts:\n        - mountPath: /usr/local/etc/envoy\n          name: config\n          readOnly: true\n      volumes:\n      - configMap:\n          items:\n          - key: envoy.yaml\n            path: envoy.yaml\n          name: envoy\n        name: config\n---\napiVersion: v1\nkind: Service\nmetadata:\n  labels:\n    app: authorino\n  name: envoy\nspec:\n  ports:\n  - name: web\n    port: 8000\n    protocol: TCP\n  selector:\n    app: authorino\n    svc: envoy\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-wildcard-host\nspec:\n  rules:\n  - host: talker-api.127.0.0.1.nip.io\n    http:\n      paths:\n      - backend:\n          service:\n            name: envoy\n            port:\n              number: 8000\n        path: /\n        pathType: Prefix\nEOF\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#deploy-the-ip-location-service","title":"\u277a Deploy the IP Location service","text":"

      The IP Location service is a simple service that resolves an IPv4 address into geo location info.

      kubectl apply -f https://raw.githubusercontent.com/Kuadrant/authorino-examples/main/ip-location/ip-location-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#create-an-authconfig","title":"\u277b Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"jwt\":\n      plain:\n        selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\n  metadata:\n    \"geoinfo\":\n      http:\n        url: 'http://ip-location.default.svc.cluster.local:3000/{context.request.http.headers.x-forwarded-for.@extract:{\"sep\":\",\"}}'\n        headers:\n          \"Accept\":\n            value: application/json\n      cache:\n        key:\n          selector: \"context.request.http.headers.x-forwarded-for.@extract:{\\\"sep\\\":\\\",\\\"}\"\n  authorization:\n    \"geofence\":\n      when:\n      - selector: auth.identity.realm_access.roles\n        operator: excl\n        value: admin\n      patternMatching:\n        patterns:\n        - selector: auth.metadata.geoinfo.country_iso_code\n          operator: eq\n          value: \"GB\"\n  response:\n    unauthorized:\n      message:\n        selector: \"The requested resource is not available in {auth.metadata.geoinfo.country_name}\"\nEOF\n
      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-a-token-and-consume-the-api","title":"\u277c Obtain a token and consume the API","text":""},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"

      Obtain an access token with the Keycloak server for John:

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster for the user John, a non-admin (member) user:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      As John, consume the API inside the area where the policy applies:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 79.123.45.67' \\\n     http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n

      As John, consume the API outside the area where the policy applies:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 109.69.200.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: The requested resource is not available in Italy\n

      As John, consume a path of the API that will cause Envoy to skip external authorization:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 109.69.200.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000/global -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#obtain-an-access-token-and-consume-the-api-as-jane-admin","title":"Obtain an access token and consume the API as Jane (admin)","text":"

      Obtain an access token with the Keycloak server for Jane, an admin user:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      As Jane, consume the API inside the area where the policy applies:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 79.123.45.67' \\\n     http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n

      As Jane, consume the API outside the area where the policy applies:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 109.69.200.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000 -i\n# HTTP/1.1 200 OK\n

      As Jane, consume a path of the API that will cause Envoy to skip external authorization:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 109.69.200.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000/global -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/envoy-jwt-authn-and-authorino/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete ingress/ingress-wildcard-host\nkubectl delete service/envoy\nkubectl delete deployment/envoy\nkubectl delete configmap/envoy\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/external-metadata/","title":"User guide: Fetching auth metadata from external sources","text":"

      Get online data from remote HTTP services to enhance authorization rules.

      Authorino capabilities featured in this guide:
      • External auth metadata \u2192 HTTP GET/GET-by-POST
      • Identity verification & authentication \u2192 API key
      • Authorization \u2192 Open Policy Agent (OPA) Rego policies

      You can configure Authorino to fetch additional metadata from external sources in request-time, by sending either GET or POST request to an HTTP service. The service is expected to return a JSON content which is appended to the Authorization JSON, thus becoming available for usage in other configs of the Auth Pipeline, such as in authorization policies or custom responses.

      URL, parameters and headers of the request to the external source of metadata can be configured, including with dynamic values. Authentication between Authorino and the service can be set as part of these configuration options, or based on shared authentication token stored in a Kubernetes Secret.

      Check out as well the user guides about Authentication with API keys and Open Policy Agent (OPA) Rego policies.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/external-metadata/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/external-metadata/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/external-metadata/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/external-metadata/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/external-metadata/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/external-metadata/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      In this example, we will implement a geofence policy for the API, using OPA and metadata fetching from an external service that returns geolocalization JSON data for a given IP address. The policy establishes that only GET requests are allowed and the path of the request should be in the form /{country-code}/*, where {country-code} is the 2-character code of the country where the client is identified as being physically present.

      The implementation relies on the X-Forwarded-For HTTP header to read the client's IP address.

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  metadata:\n    \"geo\":\n      http:\n        urlExpression: |\n          'http://ip-api.com/json/' + request.headers['x-forwarded-for'].split(',')[0] + '?fields=countryCode'\n        headers:\n          \"Accept\":\n            expression: '\"application/json\"'\n  authorization:\n    \"geofence\":\n      opa:\n        rego: |\n          import input.context.request.http\n\n          allow {\n            http.method = \"GET\"\n            split(http.path, \"/\") = [_, requested_country, _]\n            lower(requested_country) == lower(object.get(input.auth.metadata.geo, \"countryCode\", \"\"))\n          }\nEOF\n

      Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON.

      "},{"location":"authorino/docs/user-guides/external-metadata/#create-an-api-key","title":"\u277b Create an API key","text":"
      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/external-metadata/#consume-the-api","title":"\u277c Consume the API","text":"

      From an IP address assigned to the United Kingdom of Great Britain and Northern Ireland (country code GB):

      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 79.123.45.67' \\\n     http://talker-api.127.0.0.1.nip.io:8000/gb/hello -i\n# HTTP/1.1 200 OK\n
      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 79.123.45.67' \\\n     http://talker-api.127.0.0.1.nip.io:8000/it/hello -i\n# HTTP/1.1 403 Forbidden\n

      From an IP address assigned to Italy (country code IT):

      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 109.112.34.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000/gb/hello -i\n# HTTP/1.1 403 Forbidden\n
      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 109.112.34.56' \\\n     http://talker-api.127.0.0.1.nip.io:8000/it/hello -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/external-metadata/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/hello-world/","title":"User guide: Hello World","text":""},{"location":"authorino/docs/user-guides/hello-world/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant, you can skip step \u2778. You may already have Authorino installed and running as well. In this case, skip also step \u277a. If you even have your workload cluster configured, with sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, go straight to step \u277c.

      At step \u277c, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/hello-world/#create-the-namespace","title":"\u2776 Create the namespace","text":"
      kubectl create namespace hello-world\n# namespace/hello-world created\n
      "},{"location":"authorino/docs/user-guides/hello-world/#deploy-the-talker-api","title":"\u2777 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n# deployment.apps/talker-api created\n# service/talker-api created\n
      "},{"location":"authorino/docs/user-guides/hello-world/#setup-envoy","title":"\u2778 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.1

      kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/envoy-deploy.yaml\n# configmap/envoy created\n# deployment.apps/envoy created\n# service/envoy created\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl -n hello-world port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-unprotected","title":"\u2779 Consume the API (unprotected)","text":"
      curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/hello-world/#protect-the-api","title":"\u277a Protect the API","text":""},{"location":"authorino/docs/user-guides/hello-world/#install-the-authorino-operator","title":"Install the Authorino Operator","text":"
      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/hello-world/#deploy-authorino","title":"Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service2 that watches for AuthConfig resources in the hello-world namespace3, with TLS disabled4.

      kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/authorino.yaml\n# authorino.operator.authorino.kuadrant.io/authorino created\n
      "},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-behind-envoy-and-authorino","title":"\u277b Consume the API behind Envoy and Authorino","text":"
      curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 404 Not Found\n# x-ext-auth-reason: Service not found\n

      Authorino does not know about the talker-api.127.0.0.1.nip.io host, hence the 404 Not Found. Let's teach Authorino about this host by applying an AuthConfig.

      "},{"location":"authorino/docs/user-guides/hello-world/#apply-the-authconfig","title":"\u277c Apply the AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl -n hello-world apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/hello-world/authconfig.yaml\n# authconfig.authorino.kuadrant.io/talker-api-protection created\n
      "},{"location":"authorino/docs/user-guides/hello-world/#consume-the-api-without-credentials","title":"\u277d Consume the API without credentials","text":"
      curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"api-clients\"\n# x-ext-auth-reason: credential not found\n
      "},{"location":"authorino/docs/user-guides/hello-world/#grant-access-to-the-api-with-a-tailor-made-security-scheme","title":"Grant access to the API with a tailor-made security scheme","text":"

      Check out other user guides for several use-cases of authentication and authorization, and the instructions to implement them using Authorino.

      A few examples of available ser guides:

      • Authentication with API keys
      • Authentication with JWTs and OpenID Connect Discovery
      • Authentication with Kubernetes tokens (TokenReview API)
      • Authorization with Open Policy Agent (OPA) Rego policies
      • Authorization with simple JSON pattern-matching rules (e.g. JWT claims)
      • Authorization with Kubernetes RBAC (SubjectAccessReview API)
      • Fetching auth metadata from external sources
      • Token normalization
      "},{"location":"authorino/docs/user-guides/hello-world/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the namespaces created in step 1 and 5:

      kubectl delete namespace hello-world\nkubectl delete namespace authorino-operator\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      2. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      3. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      4. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/host-override/","title":"Host override via context extension","text":"

      By default, Authorino uses the host information of the HTTP request (Attributes.Http.Host) to lookup for an indexed AuthConfig to be enforced1. The host info be overridden by supplying a host entry as a (per-route) context extension (Attributes.ContextExtensions), which takes precedence whenever present.

      Overriding the host attribute of the HTTP request can be useful to support use cases such as of path prefix-based lookup and wildcard subdomains lookup.

      \u26a0\ufe0f Important: This feature may not be available to users of Authorino via Kuadrant.

      In this guide:

      • Example of host override for path prefix-based lookup
      • Example of host override for wildcard subdomain lookup
      "},{"location":"authorino/docs/user-guides/host-override/#example-of-host-override-for-path-prefix-based-lookup","title":"Example of host override for path prefix-based lookup","text":"

      In this use case, 2 different APIs (i.e. Dogs API and Cats API) are served under the same base domain, and differentiated by the path prefix:

      • pets.com/dogs \u2192 Dogs API
      • pets.com/cats \u2192 Cats API

      Edit the Envoy config to extend the external authorization settings at the level of the routes, with the host value that will be favored by Authorino before the actual host attribute of the HTTP request:

      virtual_hosts:\n\n- name: pets-api\n  domains: ['pets.com']\n  routes:\n  - match:\n      prefix: /dogs\n    route:\n      cluster: dogs-api\n    typed_per_filter_config:\n      envoy.filters.http.ext_authz:\n        \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n        check_settings:\n          context_extensions:\n            host: dogs.pets.com\n  - match:\n      prefix: /cats\n    route:\n      cluster: cats-api\n    typed_per_filter_config:\n      envoy.filters.http.ext_authz:\n        \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n        check_settings:\n          context_extensions:\n            host: cats.pets.com\n

      Create the AuthConfig for the Pets API:

      apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: dogs-api-protection\nspec:\n  hosts:\n\n  - dogs.pets.com\n\n  authentication: [...]\n

      Create the AuthConfig for the Cats API:

      apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: cats-api-protection\nspec:\n  hosts:\n\n  - cats.pets.com\n\n  authentication: [...]\n

      Notice that the host subdomains dogs.pets.com and cats.pets.com are not really requested by the API consumers. Rather, users send requests to pets.com/dogs and pets.com/cats. When routing those requests, Envoy makes sure to inject the corresponding context extensions that will induce the right lookup in Authorino.

      "},{"location":"authorino/docs/user-guides/host-override/#example-of-host-override-for-wildcard-subdomain-lookup","title":"Example of host override for wildcard subdomain lookup","text":"

      In this use case, a single Pets API serves requests for any subdomain that matches *.pets.com, e.g.:

      • dogs.pets.com \u2192 Pets API
      • cats.pets.com \u2192 Pets API

      Edit the Envoy config to extend the external authorization settings at the level of the virtual host, with the host value that will be favored by Authorino before the actual host attribute of the HTTP request:

      virtual_hosts:\n\n- name: pets-api\n  domains: ['*.pets.com']\n  typed_per_filter_config:\n    envoy.filters.http.ext_authz:\n      \\\"@type\\\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthzPerRoute\n      check_settings:\n        context_extensions:\n          host: pets.com\n  routes:\n  - match:\n      prefix: /\n    route:\n      cluster: pets-api\n

      The host context extension used above is any key that matches one of the hosts listed in the targeted AuthConfig.

      Create the AuthConfig for the Pets API:

      apiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: pets-api-protection\nspec:\n  hosts:\n\n  - pets.com\n\n  authentication: [...]\n

      Notice that requests to dogs.pets.com and to cats.pets.com are all routed by Envoy to the same API, with same external authorization configuration. in all the cases, Authorino will lookup for the indexed AuthConfig associated with pets.com. The same is valid for a request sent, e.g., to birds.pets.com.

      1. For further details about Authorino lookup of AuthConfig, check out Host lookup.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/http-basic-authentication/","title":"User guide: HTTP \"Basic\" Authentication (RFC 7235)","text":"

      Turn Authorino API key Secrets settings into HTTP basic auth.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 API key
      • Authorization \u2192 Pattern-matching authorization

      HTTP \"Basic\" Authentication (RFC 7235) is not recommended if you can afford other more secure methods such as OpenID Connect. To support legacy nonetheless it is sometimes necessary to implement it.

      In Authorino, HTTP \"Basic\" Authentication can be modeled leveraging the API key authentication feature (stored as Kubernetes Secrets with an api_key entry and labeled to match selectors specified in spec.identity.apiKey.selector of the AuthConfig).

      Check out as well the user guide about Authentication with API keys.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      The config uses API Key secrets to store base64-encoded username:password HTTP \"Basic\" authentication credentials. The config also specifies an Access Control List (ACL) by which only user john is authorized to consume the /bye endpoint of the API.

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"http-basic-auth\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: users\n      credentials:\n        authorizationHeader:\n          prefix: Basic\n  authorization:\n    \"acl\":\n      when:\n      - predicate: request.path == '/bye'\n      patternMatching:\n        patterns:\n        - selector: context.request.http.headers.authorization.@extract:{\"pos\":1}|@base64:decode|@extract:{\"sep\":\":\"}\n          operator: eq\n          value: john\nEOF\n

      Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON. Check out as well the common feature Conditions about skipping parts of an AuthConfig in the auth pipeline based on context.

      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#create-user-credentials","title":"\u277b Create user credentials","text":"

      To create credentials for HTTP \"Basic\" Authentication, store each username:password, base64-encoded, in the api_key value of the Kubernetes Secret resources. E.g.:

      printf \"john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" | base64\n# am9objpuZHlCenJlVXpGNHpxRFFzcVNQTUhrUmhyaUVPdGNSeA==\n

      Create credentials for user John:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: basic-auth-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: users\nstringData:\n  api_key: am9objpuZHlCenJlVXpGNHpxRFFzcVNQTUhrUmhyaUVPdGNSeA== # john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n

      Create credentials for user Jane:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: basic-auth-2\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: users\nstringData:\n  api_key: amFuZTpkTnNScnNhcHkwbk5Dd210NTM3ZkhGcHl4MGNCc0xFcA== # jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#consume-the-api","title":"\u277c Consume the API","text":"

      As John (authorized in the ACL):

      curl -u john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
      curl -u john:ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx http://talker-api.127.0.0.1.nip.io:8000/bye\n# HTTP/1.1 200 OK\n

      As Jane (NOT authorized in the ACL):

      curl -u jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
      curl -u jane:dNsRrsapy0nNCwmt537fHFpyx0cBsLEp http://talker-api.127.0.0.1.nip.io:8000/bye -i\n# HTTP/1.1 403 Forbidden\n

      With an invalid user/password:

      curl -u unknown:invalid http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Basic realm=\"http-basic-auth\"\n
      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#revoke-access-to-the-api","title":"\u277d Revoke access to the API","text":"
      kubectl delete secret/basic-auth-1\n
      "},{"location":"authorino/docs/user-guides/http-basic-authentication/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete secret/basic-auth-1\nkubectl delete secret/basic-auth-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/injecting-data/","title":"User guide: Injecting data in the request","text":"

      Inject HTTP headers with serialized JSON content.

      Authorino capabilities featured in this guide:
      • Dynamic response \u2192 JSON injection
      • Identity verification & authentication \u2192 API key

      Inject serialized custom JSON objects as HTTP request headers. Values can be static or fetched from the Authorization JSON.

      Check out as well the user guide about Authentication with API keys.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/injecting-data/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/injecting-data/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/injecting-data/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/injecting-data/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/injecting-data/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/injecting-data/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      The following defines a JSON object to be injected as an added HTTP header into the request, named after the response config x-ext-auth-data. The object includes 3 properties:

      1. a static value authorized: true;
      2. a dynamic value request-time, from Envoy-supplied contextual data present in the Authorization JSON; and
      3. a greeting message geeting-message that interpolates a dynamic value read from an annotation of the Kubernetes Secret resource that represents the API key used to authenticate into a static string.
      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  response:\n    success:\n      headers:\n        \"x-ext-auth-data\":\n          json:\n            properties:\n              \"authorized\":\n                expression: \"true\"\n              \"request-time\":\n                expression: request.time.seconds\n              \"greeting-message\":\n                expression: |\n                  'Hello, ' + auth.identity.metadata.annotations['auth-data/name']\nEOF\n

      Check out the docs about using Common Expression Language (CEL) for reading from the Authorization JSON.

      "},{"location":"authorino/docs/user-guides/injecting-data/#create-an-api-key","title":"\u277b Create an API key","text":"
      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\n  annotations:\n    auth-data/name: Rita\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/injecting-data/#consume-the-api","title":"\u277c Consume the API","text":"
      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# {\n#   \"method\": \"GET\",\n#   \"path\": \"/hello\",\n#   \"query_string\": null,\n#   \"body\": \"\",\n#   \"headers\": {\n#     \u2026\n#     \"X-Ext-Auth-Data\": \"{\\\"authorized\\\":true,\\\"greeting-message\\\":\\\"Hello, Rita!\\\",\\\"request-time\\\":1637954644}\",\n#   },\n#   \u2026\n# }\n
      "},{"location":"authorino/docs/user-guides/injecting-data/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/","title":"User guide: Simple pattern-matching authorization policies","text":"

      Write simple authorization rules based on JSON patterns matched against Authorino's Authorization JSON; check contextual information of the request, validate JWT claims, cross metadata fetched from external sources, etc.

      Authorino capabilities featured in this guide:
      • Authorization \u2192 Pattern-matching authorization
      • Identity verification & authentication \u2192 JWT verification

      Authorino provides a built-in authorization module to check simple pattern-matching rules against the Authorization JSON. This is an alternative to OPA when all you want is to check for some simple rules, without complex logics, such as match the value of a JWT claim.

      Check out as well the user guide about OpenID Connect Discovery and authentication with JWTs.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
      • jq, to extract parts of JSON responses

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      The email-verified-only authorization policy ensures that users consuming the API from a given network (IP range 192.168.1/24) must have their emails verified.

      The email_verified claim is a property of the identity added to the JWT by the OpenID Connect issuer.

      The implementation relies on the X-Forwarded-For HTTP header to read the client's IP address.

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  authorization:\n    \"email-verified-only\":\n      when:\n      - predicate: |\n          request.headers['x-forwarded-for'].split(',')[0].matches(\"^192\\\\\\.168\\\\\\.1\\\\\\.\\\\\\d+$\")\n      patternMatching:\n        patterns:\n        - predicate: auth.identity.email_verified\nEOF\n

      Check out the doc about using Common Expression Language (CEL) for reading from the Authorization JSON. Check out as well the common feature Conditions about skipping parts of an AuthConfig in the auth pipeline based on context.

      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api-as-jane-email-verified","title":"Obtain an access token and consume the API as Jane (email verified)","text":"

      Obtain an access token with the Keycloak server for Jane:

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      As Jane, consume the API outside the area where the policy applies:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 123.45.6.78' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      As Jane, consume the API inside the area where the policy applies:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 192.168.1.10' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#obtain-an-access-token-and-consume-the-api-as-peter-email-not-verified","title":"Obtain an access token and consume the API as Peter (email NOT verified)","text":"

      Obtain an access token with the Keycloak server for Peter:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=peter' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      As Peter, consume the API outside the area where the policy applies:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 123.45.6.78' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      As Peter, consume the API inside the area where the policy applies:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" \\\n     -H 'X-Forwarded-For: 192.168.1.10' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
      "},{"location":"authorino/docs/user-guides/json-pattern-matching-authorization/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete namespace keycloak\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/","title":"User guide: Authorization with Keycloak Authorization Services","text":"

      Keycloak provides a powerful set of tools (REST endpoints and administrative UIs), also known as Keycloak Authorization Services, to manage and enforce authorization, workflows for multiple access control mechanisms, including discretionary user access control and user-managed permissions.

      This user guide is an example of how to use Authorino as an adapter to Keycloak Authorization Services while still relying on the reverse-proxy integration pattern, thus not involving importing an authorization library nor rebuilding the application's code.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 JWT verification
      • Authorization \u2192 Open Policy Agent (OPA) Rego policies

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Keycloak server
      • jq, to extract parts of JSON responses

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      Forward local requests to Keycloak running inside the cluster (if using Kind):

      kubectl -n keycloak port-forward deployment/keycloak 8080:8080 2>&1 >/dev/null &\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      In this example, Authorino will accept access tokens (JWTs) issued by the Keycloak server. These JWTs can be either normal Keycloak ID tokens or Requesting Party Tokens (RPT).

      RPTs include claims about the permissions of the user regarding protected resources and scopes associated with a Keycloak authorization client that the user can access.

      When the supplied access token is an RPT, Authorino will just validate whether the user's granted permissions present in the token include the requested resource ID (translated from the path) and scope (inferred from the HTTP method). If the token does not contain a permissions claim (i.e. it is not an RPT), Authorino will negotiate a User-Managed Access (UMA) ticket on behalf of the user and try to obtain an RPT on that UMA ticket.

      In cases of asynchronous user-managed permission control, the first request to the API using a normal Keycloak ID token is denied by Authorino. The user that owns the resource acknowledges the access request in the Keycloak UI. If access is granted, the new permissions will be reflected in subsequent RPTs obtained by Authorino on behalf of the requesting party.

      Whenever an RPT with proper permissions is obtained by Authorino, the RPT is supplied back to the API consumer, so it can be used in subsequent requests thus skipping new negotiations of UMA tickets.

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  authorization:\n    \"uma\":\n      opa:\n        rego: |\n          pat := http.send({\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token\",\"method\": \"post\",\"headers\":{\"Content-Type\":\"application/x-www-form-urlencoded\"},\"raw_body\":\"grant_type=client_credentials\"}).body.access_token\n          resource_id := http.send({\"url\":concat(\"\",[\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/authz/protection/resource_set?uri=\",input.context.request.http.path]),\"method\":\"get\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",pat])}}).body[0]\n          scope := lower(input.context.request.http.method)\n          access_token := trim_prefix(input.context.request.http.headers.authorization, \"Bearer \")\n\n          default rpt = \"\"\n          rpt = access_token { object.get(input.auth.identity, \"authorization\", {}).permissions }\n          else = rpt_str {\n            ticket := http.send({\"url\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/authz/protection/permission\",\"method\":\"post\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",pat]),\"Content-Type\":\"application/json\"},\"raw_body\":concat(\"\",[\"[{\\\"resource_id\\\":\\\"\",resource_id,\"\\\",\\\"resource_scopes\\\":[\\\"\",scope,\"\\\"]}]\"])}).body.ticket\n            rpt_str := object.get(http.send({\"url\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token\",\"method\":\"post\",\"headers\":{\"Authorization\":concat(\" \",[\"Bearer \",access_token]),\"Content-Type\":\"application/x-www-form-urlencoded\"},\"raw_body\":concat(\"\",[\"grant_type=urn:ietf:params:oauth:grant-type:uma-ticket&ticket=\",ticket,\"&submit_request=true\"])}).body, \"access_token\", \"\")\n          }\n\n          allow {\n            permissions := object.get(io.jwt.decode(rpt)[1], \"authorization\", { \"permissions\": [] }).permissions\n            permissions[i]\n            permissions[i].rsid = resource_id\n            permissions[i].scopes[_] = scope\n          }\n        allValues: true\n  response:\n    success:\n      headers:\n        \"x-keycloak\":\n          when:\n\n          - selector: auth.identity.authorization.permissions\n            operator: eq\n            value: \"\"\n          json:\n            properties:\n              \"rpt\":\n                selector: auth.authorization.uma.rpt\nEOF\n
      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster for user Jane:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#consume-the-api","title":"\u277c Consume the API","text":"

      As Jane, try to send a GET request to the protected resource /greetings/1, owned by user John.

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n

      As John, log in to http://localhost:8080/realms/kuadrant/account in the web browser (username: john / password: p), and grant access to the resource greeting-1 for Jane. A pending permission request by Jane shall exist in the list of John's Resources.

      As Jane, try to consume the protected resource /greetings/1 again:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 200 OK\n#\n# {\u2026\n#   \"headers\": {\u2026\n#     \"X-Keycloak\": \"{\\\"rpt\\\":\\\"<RPT>\", \u2026\n

      Copy the RPT from the response and repeat the request now using the RPT to authenticate:

      curl -H \"Authorization: Bearer <RPT>\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/keycloak-authorization-services/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/","title":"User guide: Kubernetes RBAC for service authorization (SubjectAccessReview API)","text":"

      Manage permissions in the Kubernetes RBAC and let Authorino to check them in request-time with the authorization system of the cluster.

      Authorino capabilities featured in this guide:
      • Authorization \u2192 Kubernetes SubjectAccessReview
      • Identity verification & authentication \u2192 Kubernetes TokenReview

      Authorino can delegate authorization decision to the Kubernetes authorization system, allowing permissions to be stored and managed using the Kubernetes Role-Based Access Control (RBAC) for example. The feature is based on the SubjectAccessReview API and can be used for resourceAttributes (parameters defined in the AuthConfig) or nonResourceAttributes (inferring HTTP path and verb from the original request).

      Check out as well the user guide about Authentication with Kubernetes tokens (TokenReview API).

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC) and to create TokenRequests (to consume the protected service from outside the cluster)
      • jq

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      The AuthConfig below sets all Kubernetes service accounts as trusted users of the API, and relies on the Kubernetes RBAC to enforce authorization using Kubernetes SubjectAccessReview API for non-resource endpoints:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  - envoy.default.svc.cluster.local\n  authentication:\n    \"service-accounts\":\n      kubernetesTokenReview:\n        audiences: [\"https://kubernetes.default.svc.cluster.local\"]\n  authorization:\n    \"k8s-rbac\":\n      kubernetesSubjectAccessReview:\n        user:\n          selector: auth.identity.user.username\nEOF\n

      Check out the spec for the Authorino Kubernetes SubjectAccessReview authorization feature, for resource attributes permission checks where SubjectAccessReviews issued by Authorino are modeled in terms of common attributes of operations on Kubernetes resources (namespace, API group, kind, name, subresource, verb).

      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-roles-associated-with-endpoints-of-the-api","title":"\u277b Create roles associated with endpoints of the API","text":"

      Because the k8s-rbac policy defined in the AuthConfig in the previous step is for non-resource access review requests, the corresponding roles and role bindings have to be defined at cluster scope.

      Create a talker-api-greeter role whose users and service accounts bound to this role can consume the non-resource endpoints POST /hello and POST /hi of the API:

      kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: talker-api-greeter\nrules:\n\n- nonResourceURLs: [\"/hello\"]\n  verbs: [\"post\"]\n- nonResourceURLs: [\"/hi\"]\n  verbs: [\"post\"]\nEOF\n

      Create a talker-api-speaker role whose users and service accounts bound to this role can consume the non-resource endpoints POST /say/* of the API:

      kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: talker-api-speaker\nrules:\n\n- nonResourceURLs: [\"/say/*\"]\n  verbs: [\"post\"]\nEOF\n
      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#create-the-serviceaccounts-and-permissions-to-consume-the-api","title":"\u277c Create the ServiceAccounts and permissions to consume the API","text":"

      Create service accounts api-consumer-1 and api-consumer-2:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: api-consumer-1\nEOF\n
      kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: api-consumer-2\nEOF\n

      Bind both service accounts to the talker-api-greeter role:

      kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: talker-api-greeter-rolebinding\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: talker-api-greeter\nsubjects:\n\n- kind: ServiceAccount\n  name: api-consumer-1\n  namespace: default\n- kind: ServiceAccount\n  name: api-consumer-2\n  namespace: default\nEOF\n

      Bind service account api-consumer-1 to the talker-api-speaker role:

      kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: talker-api-speaker-rolebinding\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: talker-api-speaker\nsubjects:\n\n- kind: ServiceAccount\n  name: api-consumer-1\n  namespace: default\nEOF\n
      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#consume-the-api","title":"\u277d Consume the API","text":"

      Run a pod that consumes one of the greeting endpoints of the API from inside the cluster, as service account api-consumer-1, bound to the talker-api-greeter and talker-api-speaker cluster roles in the Kubernetes RBAC:

      kubectl run greeter --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n  \"apiVersion\": \"v1\",\n  \"spec\": {\n    \"containers\": [{\n      \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n      \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/hi\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n      \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n    }],\n    \"serviceAccountName\": \"api-consumer-1\",\n    \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n  }\n}' -- sh\n# Sending...\n# 200\n

      Run a pod that sends a POST request to /say/blah from within the cluster, as service account api-consumer-1:

      kubectl run speaker --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n  \"apiVersion\": \"v1\",\n  \"spec\": {\n    \"containers\": [{\n      \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n      \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/say/blah\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n      \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n    }],\n    \"serviceAccountName\": \"api-consumer-1\",\n    \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n  }\n}' -- sh\n# Sending...\n# 200\n

      Run a pod that sends a POST request to /say/blah from within the cluster, as service account api-consumer-2, bound only to the talker-api-greeter cluster role in the Kubernetes RBAC:

      kubectl run speaker --attach --rm --restart=Never -q --image=quay.io/kuadrant/authorino-examples:api-consumer --overrides='{\n  \"apiVersion\": \"v1\",\n  \"spec\": {\n    \"containers\": [{\n      \"name\": \"api-consumer\", \"image\": \"quay.io/kuadrant/authorino-examples:api-consumer\", \"command\": [\"./run\"],\n      \"args\":[\"--endpoint=http://envoy.default.svc.cluster.local:8000/say/blah\",\"--method=POST\",\"--interval=0\",\"--token-path=/var/run/secrets/tokens/api-token\"],\n      \"volumeMounts\": [{\"mountPath\": \"/var/run/secrets/tokens\",\"name\": \"access-token\"}]\n    }],\n    \"serviceAccountName\": \"api-consumer-2\",\n    \"volumes\": [{\"name\": \"access-token\",\"projected\": {\"sources\": [{\"serviceAccountToken\": {\"path\": \"api-token\",\"expirationSeconds\": 7200}}]}}]\n  }\n}' -- sh\n# Sending...\n# 403\n
      Extra: consume the API as service account api-consumer-2 from outside the cluster

      Obtain a short-lived access token for service account api-consumer-2, bound to the talker-api-greeter cluster role in the Kubernetes RBAC, using the Kubernetes TokenRequest API:

      export ACCESS_TOKEN=$(echo '{ \"apiVersion\": \"authentication.k8s.io/v1\", \"kind\": \"TokenRequest\", \"spec\": { \"expirationSeconds\": 600 } }' | kubectl create --raw /api/v1/namespaces/default/serviceaccounts/api-consumer-2/token -f - | jq -r .status.token)\n

      Consume the API as api-consumer-2 from outside the cluster:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X POST http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X POST http://talker-api.127.0.0.1.nip.io:8000/say/something -i\n# HTTP/1.1 403 Forbidden\n
      "},{"location":"authorino/docs/user-guides/kubernetes-subjectaccessreview/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete serviceaccount/api-consumer-1\nkubectl delete serviceaccount/api-consumer-2\nkubectl delete clusterrolebinding/talker-api-greeter-rolebinding\nkubectl delete clusterrolebinding/talker-api-speaker-rolebinding\nkubectl delete clusterrole/talker-api-greeter\nkubectl delete clusterrole/talker-api-speaker\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/","title":"User guide: Authentication with Kubernetes tokens (TokenReview API)","text":"

      Validate Kubernetes Service Account tokens to authenticate requests to your protected hosts.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 Kubernetes TokenReview

      Authorino can verify Kubernetes-valid access tokens (using Kubernetes TokenReview API).

      These tokens can be either ServiceAccount tokens or any valid user access tokens issued to users of the Kubernetes server API.

      The audiences claim of the token must include the requested host and port of the protected API (default), or all audiences specified in spec.identity.kubernetes.audiences of the AuthConfig.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC) and to create TokenRequests (to consume the protected service from outside the cluster)
      • jq

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  - envoy.default.svc.cluster.local\n  authentication:\n    \"authorized-service-accounts\":\n      kubernetesTokenReview:\n        audiences:\n        - talker-api\nEOF\n
      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-protected-by-authorino","title":"\u277b Consume the API protected by Authorino","text":""},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#create-a-serviceaccount","title":"Create a ServiceAccount","text":"

      Create a Kubernetes ServiceAccount to identify the consumer application that will send requests to the protected API:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: api-consumer-1\nEOF\n
      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-from-outside-the-cluster","title":"Consume the API from outside the cluster","text":"

      Obtain a short-lived access token for the api-consumer-1 service account:

      export ACCESS_TOKEN=$(echo '{ \"apiVersion\": \"authentication.k8s.io/v1\", \"kind\": \"TokenRequest\", \"spec\": { \"audiences\": [\"talker-api\"], \"expirationSeconds\": 600 } }' | kubectl create --raw /api/v1/namespaces/default/serviceaccounts/api-consumer-1/token -f - | jq -r .status.token)\n

      Consume the API with a valid Kubernetes token:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n

      Consume the API with the Kubernetes token expired (10 minutes):

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"authorized-service-accounts\"\n# x-ext-auth-reason: Not authenticated\n
      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#consume-the-api-from-inside-the-cluster","title":"Consume the API from inside the cluster","text":"

      Deploy an application that consumes an endpoint of the Talker API, in a loop, every 10 seconds. The application uses a short-lived service account token mounted inside the container using Kubernetes Service Account Token Volume Projection to authenticate.

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Pod\nmetadata:\n  name: api-consumer\nspec:\n  containers:\n\n  - name: api-consumer\n    image: quay.io/kuadrant/authorino-examples:api-consumer\n    command: [\"./run\"]\n    args:\n      - --endpoint=http://envoy.default.svc.cluster.local:8000/hello\n      - --token-path=/var/run/secrets/tokens/api-token\n      - --interval=10\n    volumeMounts:\n    - mountPath: /var/run/secrets/tokens\n      name: talker-api-access-token\n  serviceAccountName: api-consumer-1\n  volumes:\n  - name: talker-api-access-token\n    projected:\n      sources:\n      - serviceAccountToken:\n          path: api-token\n          expirationSeconds: 7200\n          audience: talker-api\nEOF\n

      Check the logs of api-consumer:

      kubectl logs -f api-consumer\n# Sending...\n# 200\n# 200\n# 200\n# 200\n# ...\n
      "},{"location":"authorino/docs/user-guides/kubernetes-tokenreview/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete pod/api-consumer\nkubectl delete serviceaccount/api-consumer-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/mtls-authentication/","title":"User guide: Authentication with X.509 certificates and Mutual Transport Layer Security (mTLS)","text":"

      Verify client X.509 certificates against trusted root CAs stored in Kubernetes Secrets to authenticate access to APIs protected with Authorino.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 X.509 client certificate authentication
      • Authorization \u2192 Pattern-matching authorization

      Authorino can verify x509 certificates presented by clients for authentication on the request to the protected APIs, at application level.

      Trusted root Certificate Authorities (CA) are stored as Kubernetes kubernetes.io/tls Secrets labeled according to selectors specified in the AuthConfig, watched and cached by Authorino.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/mtls-authentication/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277b.

      At step \u277b, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/mtls-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/mtls-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following commands will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS enabled3.

      Create the TLS certificates for the Authorino service:

      curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/default/g\" | kubectl apply -f -\n

      Request the Authorino instance:

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      certSecretRef:\n        name: authorino-server-cert\n  oidcServer:\n    tls:\n      certSecretRef:\n        name: authorino-oidc-server-cert\nEOF\n
      "},{"location":"authorino/docs/user-guides/mtls-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/mtls-authentication/#create-a-ca","title":"\u2779 Create a CA","text":"

      Create a CA (Certificate Authority) certificate to issue the client certificates that will be used to authenticate clients that send requests to the Talker API:

      openssl req -x509 -sha512 -nodes \\\n  -days 365 \\\n  -newkey rsa:4096 \\\n  -subj \"/CN=talker-api-ca\" \\\n  -addext basicConstraints=CA:TRUE \\\n  -addext keyUsage=digitalSignature,keyCertSign \\\n  -keyout /tmp/ca.key \\\n  -out /tmp/ca.crt\n

      Store the CA cert in a Kubernetes Secret, labeled to be discovered by Authorino and to be mounted in the file system of the Envoy container:

      kubectl create secret tls talker-api-ca --cert=/tmp/ca.crt --key=/tmp/ca.key\nkubectl label secret talker-api-ca authorino.kuadrant.io/managed-by=authorino app=talker-api\n

      Prepare an extension file for the client certificate signing requests:

      cat > /tmp/x509v3.ext << EOF\nauthorityKeyIdentifier=keyid,issuer\nbasicConstraints=CA:FALSE\nkeyUsage=digitalSignature,nonRepudiation,keyEncipherment,dataEncipherment\nextendedKeyUsage=clientAuth\nEOF\n
      "},{"location":"authorino/docs/user-guides/mtls-authentication/#setup-envoy","title":"\u277a Setup Envoy","text":"

      The following command deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  labels:\n    app: envoy\n  name: envoy\ndata:\n  envoy.yaml: |\n    static_resources:\n      listeners:\n\n      - address:\n          socket_address:\n            address: 0.0.0.0\n            port_value: 8443\n        filter_chains:\n        - transport_socket:\n            name: envoy.transport_sockets.tls\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext\n              common_tls_context:\n                tls_certificates:\n                - certificate_chain: {filename: \"/etc/ssl/certs/talker-api/tls.crt\"}\n                  private_key: {filename: \"/etc/ssl/certs/talker-api/tls.key\"}\n                validation_context:\n                  trusted_ca:\n                    filename: /etc/ssl/certs/talker-api/tls.crt\n          filters:\n          - name: envoy.http_connection_manager\n            typed_config:\n              \"@type\": type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager\n              stat_prefix: local\n              route_config:\n                name: local_route\n                virtual_hosts:\n                - name: local_service\n                  domains: ['*']\n                  routes:\n                  - match: { prefix: / }\n                    route: { cluster: talker-api }\n              http_filters:\n              - name: envoy.filters.http.ext_authz\n                typed_config:\n                  \"@type\": type.googleapis.com/envoy.extensions.filters.http.ext_authz.v3.ExtAuthz\n                  transport_api_version: V3\n                  failure_mode_allow: false\n                  include_peer_certificate: true\n                  grpc_service:\n                    envoy_grpc: { cluster_name: authorino }\n                    timeout: 1s\n              - name: envoy.filters.http.router\n                typed_config: {}\n              use_remote_address: true\n      clusters:\n      - name: authorino\n        connect_timeout: 0.25s\n        type: strict_dns\n        lb_policy: round_robin\n        http2_protocol_options: {}\n        load_assignment:\n          cluster_name: authorino\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: authorino-authorino-authorization\n                    port_value: 50051\n        transport_socket:\n          name: envoy.transport_sockets.tls\n          typed_config:\n            \"@type\": type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext\n            common_tls_context:\n              validation_context:\n                trusted_ca:\n                  filename: /etc/ssl/certs/authorino-ca-cert.crt\n      - name: talker-api\n        connect_timeout: 0.25s\n        type: strict_dns\n        lb_policy: round_robin\n        load_assignment:\n          cluster_name: talker-api\n          endpoints:\n          - lb_endpoints:\n            - endpoint:\n                address:\n                  socket_address:\n                    address: talker-api\n                    port_value: 3000\n    admin:\n      access_log_path: \"/tmp/admin_access.log\"\n      address:\n        socket_address:\n          address: 0.0.0.0\n          port_value: 8001\n---\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  labels:\n    app: envoy\n  name: envoy\nspec:\n  selector:\n    matchLabels:\n      app: envoy\n  template:\n    metadata:\n      labels:\n        app: envoy\n    spec:\n      containers:\n      - args:\n        - --config-path /usr/local/etc/envoy/envoy.yaml\n        - --service-cluster front-proxy\n        - --log-level info\n        - --component-log-level filter:trace,http:debug,router:debug\n        command:\n        - /usr/local/bin/envoy\n        image: envoyproxy/envoy:v1.19-latest\n        name: envoy\n        ports:\n        - containerPort: 8443\n          name: web\n        - containerPort: 8001\n          name: admin\n        volumeMounts:\n        - mountPath: /usr/local/etc/envoy\n          name: config\n          readOnly: true\n        - mountPath: /etc/ssl/certs/authorino-ca-cert.crt\n          name: authorino-ca-cert\n          readOnly: true\n          subPath: ca.crt\n        - mountPath: /etc/ssl/certs/talker-api\n          name: talker-api-ca\n          readOnly: true\n      volumes:\n      - configMap:\n          items:\n          - key: envoy.yaml\n            path: envoy.yaml\n          name: envoy\n        name: config\n      - name: authorino-ca-cert\n        secret:\n          defaultMode: 420\n          secretName: authorino-ca-cert\n      - name: talker-api-ca\n        secret:\n          defaultMode: 420\n          secretName: talker-api-ca\n---\napiVersion: v1\nkind: Service\nmetadata:\n  name: envoy\nspec:\n  selector:\n    app: envoy\n  ports:\n  - name: web\n    port: 8443\n    protocol: TCP\n---\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  name: ingress-wildcard-host\nspec:\n  rules:\n  - host: talker-api.127.0.0.1.nip.io\n    http:\n      paths:\n      - backend:\n          service:\n            name: envoy\n            port: { number: 8443 }\n        path: /\n        pathType: Prefix\nEOF\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8443 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8443:8443 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/mtls-authentication/#create-the-authconfig","title":"\u277b Create the AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"mtls\":\n      x509:\n        selector:\n          matchLabels:\n            app: talker-api\n  authorization:\n    \"acme\":\n      patternMatching:\n        patterns:\n        - selector: auth.identity.Organization\n          operator: incl\n          value: ACME Inc.\nEOF\n
      "},{"location":"authorino/docs/user-guides/mtls-authentication/#consume-the-api","title":"\u277c Consume the API","text":"

      With a TLS certificate signed by the trusted CA:

      openssl genrsa -out /tmp/aisha.key 4096\nopenssl req -new -subj \"/CN=aisha/C=PK/L=Islamabad/O=ACME Inc./OU=Engineering\" -key /tmp/aisha.key -out /tmp/aisha.csr\nopenssl x509 -req -sha512 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/aisha.csr -out /tmp/aisha.crt\n\ncurl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8443 -i\n# HTTP/1.1 200 OK\n

      With a TLS certificate signed by the trusted CA, though missing an authorized Organization:

      openssl genrsa -out /tmp/john.key 4096\nopenssl req -new -subj \"/CN=john/C=UK/L=London\" -key /tmp/john.key -out /tmp/john.csr\nopenssl x509 -req -sha512 -days 1 -CA /tmp/ca.crt -CAkey /tmp/ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/john.csr -out /tmp/john.crt\n\ncurl -k --cert /tmp/john.crt --key /tmp/john.key https://talker-api.127.0.0.1.nip.io:8443 -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
      "},{"location":"authorino/docs/user-guides/mtls-authentication/#try-the-authconfig-via-raw-http-authorization-interface","title":"\u277d Try the AuthConfig via raw HTTP authorization interface","text":"

      Expose Authorino's raw HTTP authorization to the local host:

      kubectl port-forward service/authorino-authorino-authorization 5001:5001 2>&1 >/dev/null &\n

      With a TLS certificate signed by the trusted CA:

      curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i\n# HTTP/2 200\n

      With a TLS certificate signed by an unknown authority:

      openssl req -x509 -sha512 -nodes \\\n  -days 365 \\\n  -newkey rsa:4096 \\\n  -subj \"/CN=untrusted\" \\\n  -addext basicConstraints=CA:TRUE \\\n  -addext keyUsage=digitalSignature,keyCertSign \\\n  -keyout /tmp/untrusted-ca.key \\\n  -out /tmp/untrusted-ca.crt\n\nopenssl genrsa -out /tmp/niko.key 4096\nopenssl req -new -subj \"/CN=niko/C=JP/L=Osaka\" -key /tmp/niko.key -out /tmp/niko.csr\nopenssl x509 -req -sha512 -days 1 -CA /tmp/untrusted-ca.crt -CAkey /tmp/untrusted-ca.key -CAcreateserial -extfile /tmp/x509v3.ext -in /tmp/niko.csr -out /tmp/niko.crt\n\ncurl -k --cert /tmp/niko.crt --key /tmp/niko.key -H 'Content-Type: application/json' -d '{}' https://talker-api.127.0.0.1.nip.io:5001/check -i\n# HTTP/2 401\n# www-authenticate: Basic realm=\"mtls\"\n# x-ext-auth-reason: x509: certificate signed by unknown authority\n
      "},{"location":"authorino/docs/user-guides/mtls-authentication/#revoke-an-entire-chain-of-certificates","title":"\u277e Revoke an entire chain of certificates","text":"
      kubectl delete secret/talker-api-ca\n

      Even if the deleted root certificate is still cached and accepted at the gateway, Authorino will revoke access at application level immediately.

      Try with a previously accepted certificate:

      curl -k --cert /tmp/aisha.crt --key /tmp/aisha.key https://talker-api.127.0.0.1.nip.io:8443 -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Basic realm=\"mtls\"\n# x-ext-auth-reason: x509: certificate signed by unknown authority\n
      "},{"location":"authorino/docs/user-guides/mtls-authentication/#cleanup","title":"Cleanup","text":"
      kind delete cluster --name authorino-tutorial\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/","title":"User guide: OAuth 2.0 token introspection (RFC 7662)","text":"

      Introspect OAuth 2.0 access tokens (e.g. opaque tokens) for online user data and token validation in request-time.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 OAuth 2.0 introspection
      • Authorization \u2192 Pattern-matching authorization

      Authorino can perform OAuth 2.0 token introspection (RFC 7662) on the access tokens supplied in the requests to protected APIs. This is particularly useful when using opaque tokens, for remote checking the token validity and resolving the identity object.

      Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.

      Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • OAuth 2.0 server that implements the token introspection endpoint (RFC 7662) (e.g. Keycloak or a12n-server)
      • jq, to extract parts of JSON responses

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy a Keycloak server preloaded with the realm settings required for this guide:

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      Deploy an a12n-server server preloaded with all settings required for this guide:

      kubectl create namespace a12n-server\nkubectl -n a12n-server apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/a12n-server/a12n-server-deploy.yaml\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create the required secrets that will be used by Authorino to authenticate with Keycloak and a12n-server during the introspection request:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: oauth2-token-introspection-credentials-keycloak\nstringData:\n  clientID: talker-api\n  clientSecret: 523b92b6-625d-4e1e-a313-77e7a8ae4e88\ntype: Opaque\n---\napiVersion: v1\nkind: Secret\nmetadata:\n  name: oauth2-token-introspection-credentials-a12n-server\nstringData:\n  clientID: talker-api\n  clientSecret: V6g-2Eq2ALB1_WHAswzoeZofJ_e86RI4tdjClDDDb4g\ntype: Opaque\nEOF\n

      Create the Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak\":\n      oauth2Introspection:\n        endpoint: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token/introspect\n        tokenTypeHint: requesting_party_token\n        credentialsRef:\n          name: oauth2-token-introspection-credentials-keycloak\n    \"a12n-server\":\n      oauth2Introspection:\n        endpoint: http://a12n-server.a12n-server.svc.cluster.local:8531/introspect\n        credentialsRef:\n          name: oauth2-token-introspection-credentials-a12n-server\n  authorization:\n    \"can-read\":\n      when:\n      - selector: auth.identity.privileges\n        operator: neq\n        value: \"\"\n      patternMatching:\n        patterns:\n        - selector: auth.identity.privileges.talker-api\n          operator: incl\n          value: read\nEOF\n

      On every request, Authorino will try to verify the token remotely with the Keycloak server and the a12n-server server.

      For authorization, whenever the introspected token data includes a privileges property (returned by a12n-server), Authorino will enforce only consumers whose privileges.talker-api includes the \"read\" permission are granted access.

      Check out the docs for information about the common feature Conditions about skipping parts of an AuthConfig in the auth pipeline based on context.

      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-with-keycloak-and-consume-the-api","title":"Obtain an access token with Keycloak and consume the API","text":"

      Obtain an access token with the Keycloak server for user Jane:

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:

      export $(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r '\"ACCESS_TOKEN=\"+.access_token,\"REFRESH_TOKEN=\"+.refresh_token')\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      As user Jane, consume the API:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      Revoke the access token and try to consume the API again:

      kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/logout -H \"Content-Type: application/x-www-form-urlencoded\" -d \"refresh_token=$REFRESH_TOKEN\" -d 'token_type_hint=requesting_party_token' -u demo:\n
      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#obtain-an-access-token-with-a12n-server-and-consume-the-api","title":"Obtain an access token with a12n-server and consume the API","text":"

      Forward local requests to a12n-server instance running in the cluster:

      kubectl -n a12n-server port-forward deployment/a12n-server 8531:8531 2>&1 >/dev/null &\n

      Obtain an access token with the a12n-server server for service account service-account-1:

      ACCESS_TOKEN=$(curl -d 'grant_type=client_credentials' -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s \"http://localhost:8531/token\" | jq -r .access_token)\n

      You can as well obtain an access token from within the cluster, in case your a12n-server is not reachable from the outside:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://a12n-server.a12n-server.svc.cluster.local:8531/token -s -d 'grant_type=client_credentials' -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s | jq -r .access_token)\n

      Verify the issued token is an opaque access token in this case:

      echo $ACCESS_TOKEN\n

      As service-account-1, consumer the API with a valid access token:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      Revoke the access token and try to consume the API again:

      curl -d \"token=$ACCESS_TOKEN\" -u service-account-1:FO6LgoMKA8TBDDHgSXZ5-iq1wKNwqdDkyeEGIl6gp0s \"http://localhost:8531/revoke\" -i\n
      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#consume-the-api-with-a-missing-or-invalid-access-token","title":"Consume the API with a missing or invalid access token","text":"
      curl -H \"Authorization: Bearer invalid\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak\"\n# www-authenticate: Bearer realm=\"a12n-server\"\n# x-ext-auth-reason: {\"a12n-server\":\"token is not active\",\"keycloak\":\"token is not active\"}\n
      "},{"location":"authorino/docs/user-guides/oauth2-token-introspection/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete secret/oauth2-token-introspection-credentials-keycloak\nkubectl delete secret/oauth2-token-introspection-credentials-a12n-server\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\nkubectl delete namespace a12n-server\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/observability/","title":"Observability","text":""},{"location":"authorino/docs/user-guides/observability/#metrics","title":"Metrics","text":"

      Authorino exports metrics at 2 endpoints:

      /metrics Metrics of the controller-runtime about reconciliation (caching) of AuthConfigs and API key Secrets /server-metrics Metrics of the external authorization gRPC and OIDC/Festival Wristband validation built-in HTTP servers

      The Authorino Operator creates a Kubernetes Service named <authorino-cr-name>-controller-metrics that exposes the endpoints on port 8080. The Authorino instance allows to modify the port number of the metrics endpoints, by setting the --metrics-addr command-line flag (default: :8080).

      Main metrics exported by endpoint1:

      Endpoint: /metrics Metric name Description\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0\u00a0 Labels Type controller_runtime_reconcile_total Total number of reconciliations per controller controller=authconfig|secret, result=success|error|requeue counter controller_runtime_reconcile_errors_total Total number of reconciliation errors per controller controller=authconfig|secret counter controller_runtime_reconcile_time_seconds Length of time per reconciliation per controller controller=authconfig|secret histogram controller_runtime_max_concurrent_reconciles Maximum number of concurrent reconciles per controller controller=authconfig|secret gauge workqueue_adds_total Total number of adds handled by workqueue name=authconfig|secret counter workqueue_depth Current depth of workqueue name=authconfig|secret gauge workqueue_queue_duration_seconds How long in seconds an item stays in workqueue before being requested name=authconfig|secret histogram workqueue_longest_running_processor_seconds How many seconds has the longest running processor for workqueue been running. name=authconfig|secret gauge workqueue_retries_total Total number of retries handled by workqueue name=authconfig|secret counter workqueue_unfinished_work_seconds How many seconds of work has been done that is in progress and hasn't been observed by work_duration. name=authconfig|secret gauge workqueue_work_duration_seconds How long in seconds processing an item from workqueue takes. name=authconfig|secret histogram rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host. code=200|404, method=GET|PUT|POST counter Endpoint: /server-metrics Metric name Description Labels Type auth_server_evaluator_total2 Total number of evaluations of individual authconfig rule performed by the auth server. namespace, authconfig, evaluator_type, evaluator_name counter auth_server_evaluator_cancelled2 Number of evaluations of individual authconfig rule cancelled by the auth server. namespace, authconfig, evaluator_type, evaluator_name counter auth_server_evaluator_ignored2 Number of evaluations of individual authconfig rule ignored by the auth server. namespace, authconfig, evaluator_type, evaluator_name counter auth_server_evaluator_denied2 Number of denials from individual authconfig rule evaluated by the auth server. namespace, authconfig, evaluator_type, evaluator_name counter auth_server_evaluator_duration_seconds2 Response latency of individual authconfig rule evaluated by the auth server (in seconds). namespace, authconfig, evaluator_type, evaluator_name histogram auth_server_authconfig_total Total number of authconfigs enforced by the auth server, partitioned by authconfig. namespace, authconfig counter auth_server_authconfig_response_status Response status of authconfigs sent by the auth server, partitioned by authconfig. namespace, authconfig, status=OK|UNAUTHENTICATED,PERMISSION_DENIED counter auth_server_authconfig_duration_seconds Response latency of authconfig enforced by the auth server (in seconds). namespace, authconfig histogram auth_server_response_status Response status of authconfigs sent by the auth server. status=OK|UNAUTHENTICATED,PERMISSION_DENIED|NOT_FOUND counter grpc_server_handled_total Total number of RPCs completed on the server, regardless of success or failure. grpc_code=OK|Aborted|Canceled|DeadlineExceeded|Internal|ResourceExhausted|Unknown, grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization counter grpc_server_handling_seconds Response latency (seconds) of gRPC that had been application-level handled by the server. grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization histogram grpc_server_msg_received_total Total number of RPC stream messages received on the server. grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization counter grpc_server_msg_sent_total Total number of gRPC stream messages sent by the server. grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization counter grpc_server_started_total Total number of RPCs started on the server. grpc_method=Check, grpc_service=envoy.service.auth.v3.Authorization counter http_server_handled_total Total number of calls completed on the raw HTTP authorization server, regardless of success or failure. http_code counter http_server_handling_seconds Response latency (seconds) of raw HTTP authorization request that had been application-level handled by the server. histogram oidc_server_requests_total Number of get requests received on the OIDC (Festival Wristband) server. namespace, authconfig, wristband, path=oidc-config|jwks counter oidc_server_response_status Status of HTTP response sent by the OIDC (Festival Wristband) server. status=200|404 counter

      1 Both endpoints export metrics about the Go runtime, such as number of goroutines (go_goroutines) and threads (go_threads), usage of CPU, memory and GC stats.

      2 Opt-in metrics: auth_server_evaluator_* metrics require authconfig.spec.(identity|metadata|authorization|response).metrics: true (default: false). This can be enforced for the entire instance (all AuthConfigs and evaluators), by setting the --deep-metrics-enabled command-line flag in the Authorino deployment.

      Example of metrics exported at the /metrics endpoint
      # HELP controller_runtime_active_workers Number of currently used workers per controller\n# TYPE controller_runtime_active_workers gauge\ncontroller_runtime_active_workers{controller=\"authconfig\"} 0\ncontroller_runtime_active_workers{controller=\"secret\"} 0\n# HELP controller_runtime_max_concurrent_reconciles Maximum number of concurrent reconciles per controller\n# TYPE controller_runtime_max_concurrent_reconciles gauge\ncontroller_runtime_max_concurrent_reconciles{controller=\"authconfig\"} 1\ncontroller_runtime_max_concurrent_reconciles{controller=\"secret\"} 1\n# HELP controller_runtime_reconcile_errors_total Total number of reconciliation errors per controller\n# TYPE controller_runtime_reconcile_errors_total counter\ncontroller_runtime_reconcile_errors_total{controller=\"authconfig\"} 12\ncontroller_runtime_reconcile_errors_total{controller=\"secret\"} 0\n# HELP controller_runtime_reconcile_time_seconds Length of time per reconciliation per controller\n# TYPE controller_runtime_reconcile_time_seconds histogram\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.005\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.01\"} 11\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.025\"} 17\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.05\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.1\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.15\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.2\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.25\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.3\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.35\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.4\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.45\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.6\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.7\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.8\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"0.9\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.25\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"1.75\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"2\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"2.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"3\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"3.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"4\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"4.5\"} 18\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"5\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"6\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"7\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"8\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"9\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"10\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"15\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"20\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"25\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"30\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"40\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"50\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"60\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"authconfig\",le=\"+Inf\"} 19\ncontroller_runtime_reconcile_time_seconds_sum{controller=\"authconfig\"} 5.171108321999999\ncontroller_runtime_reconcile_time_seconds_count{controller=\"authconfig\"} 19\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.005\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.01\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.025\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.05\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.1\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.15\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.2\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.3\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.35\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.4\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.45\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.6\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.7\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.8\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"0.9\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"1.75\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"2\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"2.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"3\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"3.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"4\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"4.5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"5\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"6\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"7\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"8\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"9\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"10\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"15\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"20\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"25\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"30\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"40\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"50\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"60\"} 1\ncontroller_runtime_reconcile_time_seconds_bucket{controller=\"secret\",le=\"+Inf\"} 1\ncontroller_runtime_reconcile_time_seconds_sum{controller=\"secret\"} 0.000138025\ncontroller_runtime_reconcile_time_seconds_count{controller=\"secret\"} 1\n# HELP controller_runtime_reconcile_total Total number of reconciliations per controller\n# TYPE controller_runtime_reconcile_total counter\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"error\"} 12\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"requeue\"} 0\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"requeue_after\"} 0\ncontroller_runtime_reconcile_total{controller=\"authconfig\",result=\"success\"} 7\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"error\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"requeue\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"requeue_after\"} 0\ncontroller_runtime_reconcile_total{controller=\"secret\",result=\"success\"} 1\n# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime.\n# TYPE go_gc_cycles_automatic_gc_cycles_total counter\ngo_gc_cycles_automatic_gc_cycles_total 13\n# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application.\n# TYPE go_gc_cycles_forced_gc_cycles_total counter\ngo_gc_cycles_forced_gc_cycles_total 0\n# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles.\n# TYPE go_gc_cycles_total_gc_cycles_total counter\ngo_gc_cycles_total_gc_cycles_total 13\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 4.5971e-05\ngo_gc_duration_seconds{quantile=\"0.25\"} 5.69e-05\ngo_gc_duration_seconds{quantile=\"0.5\"} 0.000140699\ngo_gc_duration_seconds{quantile=\"0.75\"} 0.000313162\ngo_gc_duration_seconds{quantile=\"1\"} 0.001692423\ngo_gc_duration_seconds_sum 0.003671076\ngo_gc_duration_seconds_count 13\n# HELP go_gc_heap_allocs_by_size_bytes_total Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_by_size_bytes_total histogram\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 6357\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 45065\n[...]\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 128306\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"+Inf\"} 128327\ngo_gc_heap_allocs_by_size_bytes_total_sum 1.5021512e+07\ngo_gc_heap_allocs_by_size_bytes_total_count 128327\n# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application.\n# TYPE go_gc_heap_allocs_bytes_total counter\ngo_gc_heap_allocs_bytes_total 1.5021512e+07\n# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_objects_total counter\ngo_gc_heap_allocs_objects_total 128327\n# HELP go_gc_heap_frees_by_size_bytes_total Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_by_size_bytes_total histogram\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 3885\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 33418\n[...]\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 96417\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"+Inf\"} 96425\ngo_gc_heap_frees_by_size_bytes_total_sum 9.880944e+06\ngo_gc_heap_frees_by_size_bytes_total_count 96425\n# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector.\n# TYPE go_gc_heap_frees_bytes_total counter\ngo_gc_heap_frees_bytes_total 9.880944e+06\n# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_objects_total counter\ngo_gc_heap_frees_objects_total 96425\n# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle.\n# TYPE go_gc_heap_goal_bytes gauge\ngo_gc_heap_goal_bytes 9.356624e+06\n# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory.\n# TYPE go_gc_heap_objects_objects gauge\ngo_gc_heap_objects_objects 31902\n# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size.\n# TYPE go_gc_heap_tiny_allocs_objects_total counter\ngo_gc_heap_tiny_allocs_objects_total 11750\n# HELP go_gc_pauses_seconds_total Distribution individual GC-related stop-the-world pause latencies.\n# TYPE go_gc_pauses_seconds_total histogram\ngo_gc_pauses_seconds_total_bucket{le=\"9.999999999999999e-10\"} 0\ngo_gc_pauses_seconds_total_bucket{le=\"1.9999999999999997e-09\"} 0\n[...]\ngo_gc_pauses_seconds_total_bucket{le=\"206708.18602188796\"} 26\ngo_gc_pauses_seconds_total_bucket{le=\"+Inf\"} 26\ngo_gc_pauses_seconds_total_sum 0.003151488\ngo_gc_pauses_seconds_total_count 26\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 80\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.18.7\"} 1\n# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory.\n# TYPE go_memory_classes_heap_free_bytes gauge\ngo_memory_classes_heap_free_bytes 589824\n# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector.\n# TYPE go_memory_classes_heap_objects_bytes gauge\ngo_memory_classes_heap_objects_bytes 5.140568e+06\n# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory.\n# TYPE go_memory_classes_heap_released_bytes gauge\ngo_memory_classes_heap_released_bytes 4.005888e+06\n# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use.\n# TYPE go_memory_classes_heap_stacks_bytes gauge\ngo_memory_classes_heap_stacks_bytes 786432\n# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects.\n# TYPE go_memory_classes_heap_unused_bytes gauge\ngo_memory_classes_heap_unused_bytes 2.0602e+06\n# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use.\n# TYPE go_memory_classes_metadata_mcache_free_bytes gauge\ngo_memory_classes_metadata_mcache_free_bytes 13984\n# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used.\n# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge\ngo_memory_classes_metadata_mcache_inuse_bytes 2400\n# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use.\n# TYPE go_memory_classes_metadata_mspan_free_bytes gauge\ngo_memory_classes_metadata_mspan_free_bytes 17104\n# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used.\n# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge\ngo_memory_classes_metadata_mspan_inuse_bytes 113968\n# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata.\n# TYPE go_memory_classes_metadata_other_bytes gauge\ngo_memory_classes_metadata_other_bytes 5.544408e+06\n# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system.\n# TYPE go_memory_classes_os_stacks_bytes gauge\ngo_memory_classes_os_stacks_bytes 0\n# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more.\n# TYPE go_memory_classes_other_bytes gauge\ngo_memory_classes_other_bytes 537777\n# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling.\n# TYPE go_memory_classes_profiling_buckets_bytes gauge\ngo_memory_classes_profiling_buckets_bytes 1.455487e+06\n# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes.\n# TYPE go_memory_classes_total_bytes gauge\ngo_memory_classes_total_bytes 2.026804e+07\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 5.140568e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 1.5021512e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.455487e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 108175\n# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.\n# TYPE go_memstats_gc_cpu_fraction gauge\ngo_memstats_gc_cpu_fraction 0\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 5.544408e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 5.140568e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 4.595712e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 7.200768e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 31902\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 4.005888e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.179648e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.6461572121033354e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 140077\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 2400\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 16384\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 113968\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 131072\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 9.356624e+06\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 537777\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 786432\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 786432\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.026804e+07\n# HELP go_sched_goroutines_goroutines Count of live goroutines.\n# TYPE go_sched_goroutines_goroutines gauge\ngo_sched_goroutines_goroutines 80\n# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running.\n# TYPE go_sched_latencies_seconds histogram\ngo_sched_latencies_seconds_bucket{le=\"9.999999999999999e-10\"} 244\ngo_sched_latencies_seconds_bucket{le=\"1.9999999999999997e-09\"} 244\n[...]\ngo_sched_latencies_seconds_bucket{le=\"206708.18602188796\"} 2336\ngo_sched_latencies_seconds_bucket{le=\"+Inf\"} 2336\ngo_sched_latencies_seconds_sum 0.18509832400000004\ngo_sched_latencies_seconds_count 2336\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 8\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 1.84\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 1.048576e+06\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 14\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 4.3728896e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.64615612779e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 7.65362176e+08\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n# HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host.\n# TYPE rest_client_requests_total counter\nrest_client_requests_total{code=\"200\",host=\"10.96.0.1:443\",method=\"GET\"} 114\nrest_client_requests_total{code=\"200\",host=\"10.96.0.1:443\",method=\"PUT\"} 4\n# HELP workqueue_adds_total Total number of adds handled by workqueue\n# TYPE workqueue_adds_total counter\nworkqueue_adds_total{name=\"authconfig\"} 19\nworkqueue_adds_total{name=\"secret\"} 1\n# HELP workqueue_depth Current depth of workqueue\n# TYPE workqueue_depth gauge\nworkqueue_depth{name=\"authconfig\"} 0\nworkqueue_depth{name=\"secret\"} 0\n# HELP workqueue_longest_running_processor_seconds How many seconds has the longest running processor for workqueue been running.\n# TYPE workqueue_longest_running_processor_seconds gauge\nworkqueue_longest_running_processor_seconds{name=\"authconfig\"} 0\nworkqueue_longest_running_processor_seconds{name=\"secret\"} 0\n# HELP workqueue_queue_duration_seconds How long in seconds an item stays in workqueue before being requested\n# TYPE workqueue_queue_duration_seconds histogram\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-08\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-07\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1e-06\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-06\"} 8\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-05\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.001\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.01\"} 17\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"0.1\"} 18\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"1\"} 18\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"10\"} 19\nworkqueue_queue_duration_seconds_bucket{name=\"authconfig\",le=\"+Inf\"} 19\nworkqueue_queue_duration_seconds_sum{name=\"authconfig\"} 4.969016371\nworkqueue_queue_duration_seconds_count{name=\"authconfig\"} 19\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-08\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-07\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1e-06\"} 0\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-06\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-05\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.001\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.01\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"0.1\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"1\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"10\"} 1\nworkqueue_queue_duration_seconds_bucket{name=\"secret\",le=\"+Inf\"} 1\nworkqueue_queue_duration_seconds_sum{name=\"secret\"} 4.67e-06\nworkqueue_queue_duration_seconds_count{name=\"secret\"} 1\n# HELP workqueue_retries_total Total number of retries handled by workqueue\n# TYPE workqueue_retries_total counter\nworkqueue_retries_total{name=\"authconfig\"} 12\nworkqueue_retries_total{name=\"secret\"} 0\n# HELP workqueue_unfinished_work_seconds How many seconds of work has been done that is in progress and hasn't been observed by work_duration. Large values indicate stuck threads. One can deduce the number of stuck threads by observing the rate at which this increases.\n# TYPE workqueue_unfinished_work_seconds gauge\nworkqueue_unfinished_work_seconds{name=\"authconfig\"} 0\nworkqueue_unfinished_work_seconds{name=\"secret\"} 0\n# HELP workqueue_work_duration_seconds How long in seconds processing an item from workqueue takes.\n# TYPE workqueue_work_duration_seconds histogram\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-08\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-07\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"9.999999999999999e-05\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.001\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.01\"} 11\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"0.1\"} 18\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"1\"} 18\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"10\"} 19\nworkqueue_work_duration_seconds_bucket{name=\"authconfig\",le=\"+Inf\"} 19\nworkqueue_work_duration_seconds_sum{name=\"authconfig\"} 5.171738079000001\nworkqueue_work_duration_seconds_count{name=\"authconfig\"} 19\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-08\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-07\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-06\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"9.999999999999999e-05\"} 0\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.001\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.01\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"0.1\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"1\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"10\"} 1\nworkqueue_work_duration_seconds_bucket{name=\"secret\",le=\"+Inf\"} 1\nworkqueue_work_duration_seconds_sum{name=\"secret\"} 0.000150956\nworkqueue_work_duration_seconds_count{name=\"secret\"} 1\n
      Example of metrics exported at the /server-metrics endpoint
      # HELP auth_server_authconfig_duration_seconds Response latency of authconfig enforced by the auth server (in seconds).\n# TYPE auth_server_authconfig_duration_seconds histogram\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.001\"} 0\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.051000000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.101\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.15100000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.201\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.251\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.301\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.351\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.40099999999999997\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.45099999999999996\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.501\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.551\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.6010000000000001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.6510000000000001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.7010000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.7510000000000002\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.8010000000000003\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.8510000000000003\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.9010000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"0.9510000000000004\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"edge-auth\",namespace=\"authorino\",le=\"+Inf\"} 1\nauth_server_authconfig_duration_seconds_sum{authconfig=\"edge-auth\",namespace=\"authorino\"} 0.001701795\nauth_server_authconfig_duration_seconds_count{authconfig=\"edge-auth\",namespace=\"authorino\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.001\"} 1\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.051000000000000004\"} 4\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.101\"} 4\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.15100000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.201\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.251\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.301\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.351\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.40099999999999997\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.45099999999999996\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.501\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.551\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.6010000000000001\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.6510000000000001\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.7010000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.7510000000000002\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.8010000000000003\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.8510000000000003\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.9010000000000004\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"0.9510000000000004\"} 5\nauth_server_authconfig_duration_seconds_bucket{authconfig=\"talker-api-protection\",namespace=\"authorino\",le=\"+Inf\"} 5\nauth_server_authconfig_duration_seconds_sum{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 0.26967658299999997\nauth_server_authconfig_duration_seconds_count{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 5\n# HELP auth_server_authconfig_response_status Response status of authconfigs sent by the auth server, partitioned by authconfig.\n# TYPE auth_server_authconfig_response_status counter\nauth_server_authconfig_response_status{authconfig=\"edge-auth\",namespace=\"authorino\",status=\"OK\"} 1\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"OK\"} 2\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"PERMISSION_DENIED\"} 2\nauth_server_authconfig_response_status{authconfig=\"talker-api-protection\",namespace=\"authorino\",status=\"UNAUTHENTICATED\"} 1\n# HELP auth_server_authconfig_total Total number of authconfigs enforced by the auth server, partitioned by authconfig.\n# TYPE auth_server_authconfig_total counter\nauth_server_authconfig_total{authconfig=\"edge-auth\",namespace=\"authorino\"} 1\nauth_server_authconfig_total{authconfig=\"talker-api-protection\",namespace=\"authorino\"} 5\n# HELP auth_server_evaluator_duration_seconds Response latency of individual authconfig rule evaluated by the auth server (in seconds).\n# TYPE auth_server_evaluator_duration_seconds histogram\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.001\"} 0\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.051000000000000004\"} 3\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.101\"} 3\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.15100000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.201\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.251\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.301\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.351\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.40099999999999997\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.45099999999999996\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.501\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.551\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.6010000000000001\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.6510000000000001\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.7010000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.7510000000000002\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.8010000000000003\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.8510000000000003\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.9010000000000004\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"0.9510000000000004\"} 4\nauth_server_evaluator_duration_seconds_bucket{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\",le=\"+Inf\"} 4\nauth_server_evaluator_duration_seconds_sum{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 0.25800055\nauth_server_evaluator_duration_seconds_count{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 4\n# HELP auth_server_evaluator_total Total number of evaluations of individual authconfig rule performed by the auth server.\n# TYPE auth_server_evaluator_total counter\nauth_server_evaluator_total{authconfig=\"talker-api-protection\",evaluator_name=\"geo\",evaluator_type=\"METADATA_GENERIC_HTTP\",namespace=\"authorino\"} 4\n# HELP auth_server_response_status Response status of authconfigs sent by the auth server.\n# TYPE auth_server_response_status counter\nauth_server_response_status{status=\"NOT_FOUND\"} 1\nauth_server_response_status{status=\"OK\"} 3\nauth_server_response_status{status=\"PERMISSION_DENIED\"} 2\nauth_server_response_status{status=\"UNAUTHENTICATED\"} 1\n# HELP go_gc_cycles_automatic_gc_cycles_total Count of completed GC cycles generated by the Go runtime.\n# TYPE go_gc_cycles_automatic_gc_cycles_total counter\ngo_gc_cycles_automatic_gc_cycles_total 11\n# HELP go_gc_cycles_forced_gc_cycles_total Count of completed GC cycles forced by the application.\n# TYPE go_gc_cycles_forced_gc_cycles_total counter\ngo_gc_cycles_forced_gc_cycles_total 0\n# HELP go_gc_cycles_total_gc_cycles_total Count of all completed GC cycles.\n# TYPE go_gc_cycles_total_gc_cycles_total counter\ngo_gc_cycles_total_gc_cycles_total 11\n# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.\n# TYPE go_gc_duration_seconds summary\ngo_gc_duration_seconds{quantile=\"0\"} 4.5971e-05\ngo_gc_duration_seconds{quantile=\"0.25\"} 5.69e-05\ngo_gc_duration_seconds{quantile=\"0.5\"} 0.000158594\ngo_gc_duration_seconds{quantile=\"0.75\"} 0.000324091\ngo_gc_duration_seconds{quantile=\"1\"} 0.001692423\ngo_gc_duration_seconds_sum 0.003546711\ngo_gc_duration_seconds_count 11\n# HELP go_gc_heap_allocs_by_size_bytes_total Distribution of heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_by_size_bytes_total histogram\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 6261\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 42477\n[...]\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 122133\ngo_gc_heap_allocs_by_size_bytes_total_bucket{le=\"+Inf\"} 122154\ngo_gc_heap_allocs_by_size_bytes_total_sum 1.455944e+07\ngo_gc_heap_allocs_by_size_bytes_total_count 122154\n# HELP go_gc_heap_allocs_bytes_total Cumulative sum of memory allocated to the heap by the application.\n# TYPE go_gc_heap_allocs_bytes_total counter\ngo_gc_heap_allocs_bytes_total 1.455944e+07\n# HELP go_gc_heap_allocs_objects_total Cumulative count of heap allocations triggered by the application. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_allocs_objects_total counter\ngo_gc_heap_allocs_objects_total 122154\n# HELP go_gc_heap_frees_by_size_bytes_total Distribution of freed heap allocations by approximate size. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_by_size_bytes_total histogram\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"8.999999999999998\"} 3789\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"16.999999999999996\"} 31067\n[...]\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"32768.99999999999\"} 91013\ngo_gc_heap_frees_by_size_bytes_total_bucket{le=\"+Inf\"} 91021\ngo_gc_heap_frees_by_size_bytes_total_sum 9.399936e+06\ngo_gc_heap_frees_by_size_bytes_total_count 91021\n# HELP go_gc_heap_frees_bytes_total Cumulative sum of heap memory freed by the garbage collector.\n# TYPE go_gc_heap_frees_bytes_total counter\ngo_gc_heap_frees_bytes_total 9.399936e+06\n# HELP go_gc_heap_frees_objects_total Cumulative count of heap allocations whose storage was freed by the garbage collector. Note that this does not include tiny objects as defined by /gc/heap/tiny/allocs:objects, only tiny blocks.\n# TYPE go_gc_heap_frees_objects_total counter\ngo_gc_heap_frees_objects_total 91021\n# HELP go_gc_heap_goal_bytes Heap size target for the end of the GC cycle.\n# TYPE go_gc_heap_goal_bytes gauge\ngo_gc_heap_goal_bytes 9.601744e+06\n# HELP go_gc_heap_objects_objects Number of objects, live or unswept, occupying heap memory.\n# TYPE go_gc_heap_objects_objects gauge\ngo_gc_heap_objects_objects 31133\n# HELP go_gc_heap_tiny_allocs_objects_total Count of small allocations that are packed together into blocks. These allocations are counted separately from other allocations because each individual allocation is not tracked by the runtime, only their block. Each block is already accounted for in allocs-by-size and frees-by-size.\n# TYPE go_gc_heap_tiny_allocs_objects_total counter\ngo_gc_heap_tiny_allocs_objects_total 9866\n# HELP go_gc_pauses_seconds_total Distribution individual GC-related stop-the-world pause latencies.\n# TYPE go_gc_pauses_seconds_total histogram\ngo_gc_pauses_seconds_total_bucket{le=\"9.999999999999999e-10\"} 0\ngo_gc_pauses_seconds_total_bucket{le=\"1.9999999999999997e-09\"} 0\n[...]\ngo_gc_pauses_seconds_total_bucket{le=\"206708.18602188796\"} 22\ngo_gc_pauses_seconds_total_bucket{le=\"+Inf\"} 22\ngo_gc_pauses_seconds_total_sum 0.0030393599999999996\ngo_gc_pauses_seconds_total_count 22\n# HELP go_goroutines Number of goroutines that currently exist.\n# TYPE go_goroutines gauge\ngo_goroutines 79\n# HELP go_info Information about the Go environment.\n# TYPE go_info gauge\ngo_info{version=\"go1.18.7\"} 1\n# HELP go_memory_classes_heap_free_bytes Memory that is completely free and eligible to be returned to the underlying system, but has not been. This metric is the runtime's estimate of free address space that is backed by physical memory.\n# TYPE go_memory_classes_heap_free_bytes gauge\ngo_memory_classes_heap_free_bytes 630784\n# HELP go_memory_classes_heap_objects_bytes Memory occupied by live objects and dead objects that have not yet been marked free by the garbage collector.\n# TYPE go_memory_classes_heap_objects_bytes gauge\ngo_memory_classes_heap_objects_bytes 5.159504e+06\n# HELP go_memory_classes_heap_released_bytes Memory that is completely free and has been returned to the underlying system. This metric is the runtime's estimate of free address space that is still mapped into the process, but is not backed by physical memory.\n# TYPE go_memory_classes_heap_released_bytes gauge\ngo_memory_classes_heap_released_bytes 3.858432e+06\n# HELP go_memory_classes_heap_stacks_bytes Memory allocated from the heap that is reserved for stack space, whether or not it is currently in-use.\n# TYPE go_memory_classes_heap_stacks_bytes gauge\ngo_memory_classes_heap_stacks_bytes 786432\n# HELP go_memory_classes_heap_unused_bytes Memory that is reserved for heap objects but is not currently used to hold heap objects.\n# TYPE go_memory_classes_heap_unused_bytes gauge\ngo_memory_classes_heap_unused_bytes 2.14776e+06\n# HELP go_memory_classes_metadata_mcache_free_bytes Memory that is reserved for runtime mcache structures, but not in-use.\n# TYPE go_memory_classes_metadata_mcache_free_bytes gauge\ngo_memory_classes_metadata_mcache_free_bytes 13984\n# HELP go_memory_classes_metadata_mcache_inuse_bytes Memory that is occupied by runtime mcache structures that are currently being used.\n# TYPE go_memory_classes_metadata_mcache_inuse_bytes gauge\ngo_memory_classes_metadata_mcache_inuse_bytes 2400\n# HELP go_memory_classes_metadata_mspan_free_bytes Memory that is reserved for runtime mspan structures, but not in-use.\n# TYPE go_memory_classes_metadata_mspan_free_bytes gauge\ngo_memory_classes_metadata_mspan_free_bytes 16696\n# HELP go_memory_classes_metadata_mspan_inuse_bytes Memory that is occupied by runtime mspan structures that are currently being used.\n# TYPE go_memory_classes_metadata_mspan_inuse_bytes gauge\ngo_memory_classes_metadata_mspan_inuse_bytes 114376\n# HELP go_memory_classes_metadata_other_bytes Memory that is reserved for or used to hold runtime metadata.\n# TYPE go_memory_classes_metadata_other_bytes gauge\ngo_memory_classes_metadata_other_bytes 5.544408e+06\n# HELP go_memory_classes_os_stacks_bytes Stack memory allocated by the underlying operating system.\n# TYPE go_memory_classes_os_stacks_bytes gauge\ngo_memory_classes_os_stacks_bytes 0\n# HELP go_memory_classes_other_bytes Memory used by execution trace buffers, structures for debugging the runtime, finalizer and profiler specials, and more.\n# TYPE go_memory_classes_other_bytes gauge\ngo_memory_classes_other_bytes 537777\n# HELP go_memory_classes_profiling_buckets_bytes Memory that is used by the stack trace hash map used for profiling.\n# TYPE go_memory_classes_profiling_buckets_bytes gauge\ngo_memory_classes_profiling_buckets_bytes 1.455487e+06\n# HELP go_memory_classes_total_bytes All memory mapped by the Go runtime into the current process as read-write. Note that this does not include memory mapped by code called via cgo or via the syscall package. Sum of all metrics in /memory/classes.\n# TYPE go_memory_classes_total_bytes gauge\ngo_memory_classes_total_bytes 2.026804e+07\n# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.\n# TYPE go_memstats_alloc_bytes gauge\ngo_memstats_alloc_bytes 5.159504e+06\n# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.\n# TYPE go_memstats_alloc_bytes_total counter\ngo_memstats_alloc_bytes_total 1.455944e+07\n# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.\n# TYPE go_memstats_buck_hash_sys_bytes gauge\ngo_memstats_buck_hash_sys_bytes 1.455487e+06\n# HELP go_memstats_frees_total Total number of frees.\n# TYPE go_memstats_frees_total counter\ngo_memstats_frees_total 100887\n# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.\n# TYPE go_memstats_gc_cpu_fraction gauge\ngo_memstats_gc_cpu_fraction 0\n# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.\n# TYPE go_memstats_gc_sys_bytes gauge\ngo_memstats_gc_sys_bytes 5.544408e+06\n# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.\n# TYPE go_memstats_heap_alloc_bytes gauge\ngo_memstats_heap_alloc_bytes 5.159504e+06\n# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.\n# TYPE go_memstats_heap_idle_bytes gauge\ngo_memstats_heap_idle_bytes 4.489216e+06\n# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.\n# TYPE go_memstats_heap_inuse_bytes gauge\ngo_memstats_heap_inuse_bytes 7.307264e+06\n# HELP go_memstats_heap_objects Number of allocated objects.\n# TYPE go_memstats_heap_objects gauge\ngo_memstats_heap_objects 31133\n# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.\n# TYPE go_memstats_heap_released_bytes gauge\ngo_memstats_heap_released_bytes 3.858432e+06\n# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.\n# TYPE go_memstats_heap_sys_bytes gauge\ngo_memstats_heap_sys_bytes 1.179648e+07\n# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.\n# TYPE go_memstats_last_gc_time_seconds gauge\ngo_memstats_last_gc_time_seconds 1.6461569717723043e+09\n# HELP go_memstats_lookups_total Total number of pointer lookups.\n# TYPE go_memstats_lookups_total counter\ngo_memstats_lookups_total 0\n# HELP go_memstats_mallocs_total Total number of mallocs.\n# TYPE go_memstats_mallocs_total counter\ngo_memstats_mallocs_total 132020\n# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.\n# TYPE go_memstats_mcache_inuse_bytes gauge\ngo_memstats_mcache_inuse_bytes 2400\n# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.\n# TYPE go_memstats_mcache_sys_bytes gauge\ngo_memstats_mcache_sys_bytes 16384\n# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.\n# TYPE go_memstats_mspan_inuse_bytes gauge\ngo_memstats_mspan_inuse_bytes 114376\n# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.\n# TYPE go_memstats_mspan_sys_bytes gauge\ngo_memstats_mspan_sys_bytes 131072\n# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.\n# TYPE go_memstats_next_gc_bytes gauge\ngo_memstats_next_gc_bytes 9.601744e+06\n# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.\n# TYPE go_memstats_other_sys_bytes gauge\ngo_memstats_other_sys_bytes 537777\n# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.\n# TYPE go_memstats_stack_inuse_bytes gauge\ngo_memstats_stack_inuse_bytes 786432\n# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.\n# TYPE go_memstats_stack_sys_bytes gauge\ngo_memstats_stack_sys_bytes 786432\n# HELP go_memstats_sys_bytes Number of bytes obtained from system.\n# TYPE go_memstats_sys_bytes gauge\ngo_memstats_sys_bytes 2.026804e+07\n# HELP go_sched_goroutines_goroutines Count of live goroutines.\n# TYPE go_sched_goroutines_goroutines gauge\ngo_sched_goroutines_goroutines 79\n# HELP go_sched_latencies_seconds Distribution of the time goroutines have spent in the scheduler in a runnable state before actually running.\n# TYPE go_sched_latencies_seconds histogram\ngo_sched_latencies_seconds_bucket{le=\"9.999999999999999e-10\"} 225\ngo_sched_latencies_seconds_bucket{le=\"1.9999999999999997e-09\"} 225\n[...]\ngo_sched_latencies_seconds_bucket{le=\"206708.18602188796\"} 1916\ngo_sched_latencies_seconds_bucket{le=\"+Inf\"} 1916\ngo_sched_latencies_seconds_sum 0.18081453600000003\ngo_sched_latencies_seconds_count 1916\n# HELP go_threads Number of OS threads created.\n# TYPE go_threads gauge\ngo_threads 8\n# HELP grpc_server_handled_total Total number of RPCs completed on the server, regardless of success or failure.\n# TYPE grpc_server_handled_total counter\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Aborted\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"AlreadyExists\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Canceled\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DataLoss\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"DeadlineExceeded\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"FailedPrecondition\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Internal\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"InvalidArgument\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"NotFound\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OK\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"OutOfRange\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"PermissionDenied\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"ResourceExhausted\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unauthenticated\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unavailable\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unimplemented\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_handled_total{grpc_code=\"Unknown\",grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_handling_seconds Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.\n# TYPE grpc_server_handling_seconds histogram\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.005\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.01\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.025\"} 3\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.05\"} 6\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.1\"} 6\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.25\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"0.5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"1\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"2.5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"5\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"10\"} 7\ngrpc_server_handling_seconds_bucket{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\",le=\"+Inf\"} 7\ngrpc_server_handling_seconds_sum{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 0.277605516\ngrpc_server_handling_seconds_count{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\n# HELP grpc_server_msg_received_total Total number of RPC stream messages received on the server.\n# TYPE grpc_server_msg_received_total counter\ngrpc_server_msg_received_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_msg_received_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_msg_received_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_msg_sent_total Total number of gRPC stream messages sent by the server.\n# TYPE grpc_server_msg_sent_total counter\ngrpc_server_msg_sent_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_msg_sent_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_msg_sent_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP grpc_server_started_total Total number of RPCs started on the server.\n# TYPE grpc_server_started_total counter\ngrpc_server_started_total{grpc_method=\"Check\",grpc_service=\"envoy.service.auth.v3.Authorization\",grpc_type=\"unary\"} 7\ngrpc_server_started_total{grpc_method=\"Check\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"unary\"} 0\ngrpc_server_started_total{grpc_method=\"Watch\",grpc_service=\"grpc.health.v1.Health\",grpc_type=\"server_stream\"} 0\n# HELP oidc_server_requests_total Number of get requests received on the OIDC (Festival Wristband) server.\n# TYPE oidc_server_requests_total counter\noidc_server_requests_total{authconfig=\"edge-auth\",namespace=\"authorino\",path=\"/.well-known/openid-configuration\",wristband=\"wristband\"} 1\noidc_server_requests_total{authconfig=\"edge-auth\",namespace=\"authorino\",path=\"/.well-known/openid-connect/certs\",wristband=\"wristband\"} 1\n# HELP oidc_server_response_status Status of HTTP response sent by the OIDC (Festival Wristband) server.\n# TYPE oidc_server_response_status counter\noidc_server_response_status{status=\"200\"} 2\n# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.\n# TYPE process_cpu_seconds_total counter\nprocess_cpu_seconds_total 1.42\n# HELP process_max_fds Maximum number of open file descriptors.\n# TYPE process_max_fds gauge\nprocess_max_fds 1.048576e+06\n# HELP process_open_fds Number of open file descriptors.\n# TYPE process_open_fds gauge\nprocess_open_fds 14\n# HELP process_resident_memory_bytes Resident memory size in bytes.\n# TYPE process_resident_memory_bytes gauge\nprocess_resident_memory_bytes 4.370432e+07\n# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.\n# TYPE process_start_time_seconds gauge\nprocess_start_time_seconds 1.64615612779e+09\n# HELP process_virtual_memory_bytes Virtual memory size in bytes.\n# TYPE process_virtual_memory_bytes gauge\nprocess_virtual_memory_bytes 7.65362176e+08\n# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.\n# TYPE process_virtual_memory_max_bytes gauge\nprocess_virtual_memory_max_bytes 1.8446744073709552e+19\n# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.\n# TYPE promhttp_metric_handler_requests_in_flight gauge\npromhttp_metric_handler_requests_in_flight 1\n# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.\n# TYPE promhttp_metric_handler_requests_total counter\npromhttp_metric_handler_requests_total{code=\"200\"} 1\npromhttp_metric_handler_requests_total{code=\"500\"} 0\npromhttp_metric_handler_requests_total{code=\"503\"} 0\n
      "},{"location":"authorino/docs/user-guides/observability/#readiness-check","title":"Readiness check","text":"

      Authorino exposes two main endpoints for health and readiness check of the AuthConfig controller:

      • /healthz: Health probe (ping) \u2013 reports \"ok\" if the controller is healthy.
      • /readyz: Readiness probe \u2013 reports \"ok\" if the controller is ready to reconcile AuthConfig-related events.

      In general, the endpoints return either 200 (\"ok\", i.e. all checks have passed) or 500 (when one or more checks failed).

      The default binding network address is :8081, which can be changed by setting the command-line flag --health-probe-addr.

      The following additional subpath is available and its corresponding check can be aggregated into the response from the main readiness probe:

      • /readyz/authconfigs: Aggregated readiness status of the AuthConfigs \u2013 reports \"ok\" if all AuthConfigs watched by the reconciler have been marked as ready.
      Important!The AuthConfig readiness check within the scope of the aggregated readiness probe endpoint is deactivated by default \u2013 i.e. this check is an opt-in check. Sending a request to the /readyz endpoint without explicitly opting-in for the AuthConfigs check, by using the include parameter, will result in a response message that disregards the actual status of the watched AuthConfigs, possibly an \"ok\" message. To read the aggregated status of the watched AuthConfigs, either use the specific endpoint /readyz/authconfigs or opt-in for the check in the aggregated endpoint by sending a request to /readyz?include=authconfigs

      Apart from include to add the aggregated status of the AuthConfigs, the following additional query string parameters are available:

      • verbose=true|false - provides more verbose response messages;
      • exclude=(check name) \u2013 to exclude a particular readiness check (for future usage).
      "},{"location":"authorino/docs/user-guides/observability/#logging","title":"Logging","text":"

      Authorino provides structured log messages (\"production\") or more log messages output to stdout in a more user-friendly format (\"development\" mode) and different level of logging.

      "},{"location":"authorino/docs/user-guides/observability/#log-levels-and-log-modes","title":"Log levels and log modes","text":"

      Authorino outputs 3 levels of log messages: (from lowest to highest level)

      1. debug
      2. info (default)
      3. error

      info logging is restricted to high-level information of the gRPC and HTTP authorization services, limiting messages to incoming request and respective outgoing response logs, with reduced details about the corresponding objects (request payload and authorization result), and without any further detailed logs of the steps in between, except for errors.

      Only debug logging will include processing details of each Auth Pipeline, such as intermediary requests to validate identities with external auth servers, requests to external sources of auth metadata or authorization policies.

      To configure the desired log level, set the spec.logLevel field of the Authorino custom resource (or --log-level command-line flag in the Authorino deployment), to one of the supported values listed above. Default log level is info.

      Apart from log level, Authorino can output messages to the logs in 2 different formats:

      • production (default): each line is a parseable JSON object with properties {\"level\":string, \"ts\":int, \"msg\":string, \"logger\":string, extra values...}
      • development: more human-readable outputs, extra stack traces and logging info, plus extra values output as JSON, in the format: <timestamp-iso-8601>\\t<log-level>\\t<logger>\\t<message>\\t{extra-values-as-json}

      To configure the desired log mode, set the spec.logMode field of the Authorino custom resource (or --log-mode command-line flag in the Authorino deployment), to one of the supported values listed above. Default log level is production.

      Example of Authorino custom resource with log level debug and log mode production:

      apiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  logLevel: debug\n  logMode: production\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\n
      "},{"location":"authorino/docs/user-guides/observability/#sensitive-data-output-to-the-logs","title":"Sensitive data output to the logs","text":"

      Authorino will never output HTTP headers and query string parameters to info log messages, as such values usually include sensitive data (e.g. access tokens, API keys and Authorino Festival Wristbands). However, debug log messages may include such sensitive information and those are not redacted.

      Therefore, DO NOT USE debug LOG LEVEL IN PRODUCTION! Instead, use either info or error.

      "},{"location":"authorino/docs/user-guides/observability/#log-messages-printed-by-authorino","title":"Log messages printed by Authorino","text":"

      Some log messages printed by Authorino and corresponding extra values included:

      logger level message extra values authorino info \"setting instance base logger\" min level=info\\|debug, mode=production\\|development authorino info \"booting up authorino\" version authorino debug \"setting up with options\" auth-config-label-selector, deep-metrics-enabled, enable-leader-election, evaluator-cache-size, ext-auth-grpc-port, ext-auth-http-port, health-probe-addr, log-level, log-mode, max-http-request-body-size, metrics-addr, oidc-http-port, oidc-tls-cert, oidc-tls-cert-key, secret-label-selector, timeout, tls-cert, tls-cert-key, watch-namespace authorino info \"attempting to acquire leader lease <namespace>/cb88a58a.authorino.kuadrant.io...\\n\" authorino info \"successfully acquired lease <namespace>/cb88a58a.authorino.kuadrant.io\\n\" authorino info \"disabling grpc auth service\" authorino info \"starting grpc auth service\" port, tls authorino error \"failed to obtain port for the grpc auth service\" authorino error \"failed to load tls cert for the grpc auth\" authorino error \"failed to start grpc auth service\" authorino info \"disabling http auth service\" authorino info \"starting http auth service\" port, tls authorino error \"failed to obtain port for the http auth service\" authorino error \"failed to start http auth service\" authorino info \"disabling http oidc service\" authorino info \"starting http oidc service\" port, tls authorino error \"failed to obtain port for the http oidc service\" authorino error \"failed to start http oidc service\" authorino info \"starting manager\" authorino error \"unable to start manager\" authorino error \"unable to create controller\" controller=authconfig\\|secret\\|authconfigstatusupdate authorino error \"problem running manager\" authorino info \"starting status update manager\" authorino error \"unable to start status update manager\" authorino error \"problem running status update manager\" authorino.controller-runtime.metrics info \"metrics server is starting to listen\" addr authorino.controller-runtime.manager info \"starting metrics server\" path authorino.controller-runtime.manager.events debug \"Normal\" object={kind=ConfigMap, apiVersion=v1}, reauthorino.ason=LeaderElection, message=\"authorino-controller-manager-* became leader\" authorino.controller-runtime.manager.events debug \"Normal\" object={kind=Lease, apiVersion=coordination.k8s.io/v1}, reauthorino.ason=LeaderElection, message=\"authorino-controller-manager-* became leader\" authorino.controller-runtime.manager.controller.authconfig info \"resource reconciled\" authconfig authorino.controller-runtime.manager.controller.authconfig info \"host already taken\" authconfig, host authorino.controller-runtime.manager.controller.authconfig.statusupdater debug \"resource status did not change\" authconfig authorino.controller-runtime.manager.controller.authconfig.statusupdater debug \"resource status changed\" authconfig, authconfig/status authorino.controller-runtime.manager.controller.authconfig.statusupdater error \"failed to update the resource\" authconfig authorino.controller-runtime.manager.controller.authconfig.statusupdater info \"resource status updated\" authconfig authorino.controller-runtime.manager.controller.secret info \"resource reconciled\" authorino.controller-runtime.manager.controller.secret info \"could not reconcile authconfigs using api key authorino.authentication\" authorino.service.oidc info \"request received\" request id, url, realm, config, path authorino.service.oidc info \"response sent\" request id authorino.service.oidc error \"failed to serve oidc request\" authorino.service.auth info \"incoming authorization request\" request id, object authorino.service.auth debug \"incoming authorization request\" request id, object authorino.service.auth info \"outgoing authorization response\" request id, authorized, response, object authorino.service.auth debug \"outgoing authorization response\" request id, authorized, response, object authorino.service.auth error \"failed to create dynamic metadata\" request id, object authorino.service.auth.authpipeline debug \"skipping config\" request id, config, reason authorino.service.auth.authpipeline.identity debug \"identity validated\" request id, config, object authorino.service.auth.authpipeline.identity debug \"cannot validate identity\" request id, config, reason authorino.service.auth.authpipeline.identity error \"failed to extend identity object\" request id, config, object authorino.service.auth.authpipeline.identity.oidc error \"failed to discovery openid connect configuration\" endpoint authorino.service.auth.authpipeline.identity.oidc debug \"auto-refresh of openid connect configuration disabled\" endpoint, reason authorino.service.auth.authpipeline.identity.oidc debug \"openid connect configuration updated\" endpoint authorino.service.auth.authpipeline.identity.oauth2 debug \"sending token introspection request\" request id, url, data authorino.service.auth.authpipeline.identity.kubernetesauth debug \"calling kubernetes token review api\" request id, tokenreview authorino.service.auth.authpipeline.identity.apikey error \"Something went wrong fetching the authorized credentials\" authorino.service.auth.authpipeline.metadata debug \"fetched auth metadata\" request id, config, object authorino.service.auth.authpipeline.metadata debug \"cannot fetch metadata\" request id, config, reason authorino.service.auth.authpipeline.metadata.http debug \"sending request\" request id, method, url, headers authorino.service.auth.authpipeline.metadata.userinfo debug \"fetching user info\" request id, endpoint authorino.service.auth.authpipeline.metadata.uma debug \"requesting pat\" request id, url, data, headers authorino.service.auth.authpipeline.metadata.uma debug \"querying resources by uri\" request id, url authorino.service.auth.authpipeline.metadata.uma debug \"getting resource data\" request id, url authorino.service.auth.authpipeline.authorization debug \"evaluating for input\" request id, input authorino.service.auth.authpipeline.authorization debug \"access granted\" request id, config, object authorino.service.auth.authpipeline.authorization debug \"access denied\" request id, config, reason authorino.service.auth.authpipeline.authorization.opa error \"invalid response from policy evaluation\" policy authorino.service.auth.authpipeline.authorization.opa error \"failed to precompile policy\" policy authorino.service.auth.authpipeline.authorization.opa error \"failed to download policy from external registry\" policy, endpoint authorino.service.auth.authpipeline.authorization.opa error \"failed to refresh policy from external registry\" policy, endpoint authorino.service.auth.authpipeline.authorization.opa debug \"external policy unchanged\" policy, endpoint authorino.service.auth.authpipeline.authorization.opa debug \"auto-refresh of external policy disabled\" policy, endpoint, reason authorino.service.auth.authpipeline.authorization.opa info \"policy updated from external registry\" policy, endpoint authorino.service.auth.authpipeline.authorization.kubernetesauthz debug \"calling kubernetes subject access review api\" request id, subjectaccessreview authorino.service.auth.authpipeline.response debug \"dynamic response built\" request id, config, object authorino.service.auth.authpipeline.response debug \"cannot build dynamic response\" request id, config, reason authorino.service.auth.http debug \"bad request\" request id authorino.service.auth.http debug \"not found\" request id authorino.service.auth.http debug \"request body too large\" request id authorino.service.auth.http debug \"service unavailable\" request id"},{"location":"authorino/docs/user-guides/observability/#examples","title":"Examples","text":"

      The examples below are all with --log-level=debug and --log-mode=production.

      Booting up the service
      {\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"setting instance base logger\",\"min level\":\"info\",\"mode\":\"production\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"build information\",\"version\":\"v1beta3\",\"commit\":\"ae2dc8150af2e6cdb35957ba7305c4c2a76d6149\",\"dirty\":\"false\",\"cmd\":\"server\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting http auth service\",\"port\":5001,\"tls\":false}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting grpc auth service\",\"port\":50051,\"tls\":false}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting http oidc service\",\"port\":8083,\"tls\":false}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting reconciliation manager\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting server\",\"kind\":\"health probe\",\"addr\":\"[::]:8081\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino.controller-runtime.metrics\",\"msg\":\"Starting metrics server\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino.controller-runtime.metrics\",\"msg\":\"Serving metrics server\",\"bindAddress\":\":8080\",\"secure\":false}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting EventSource\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\",\"source\":\"kind source: *v1beta3.AuthConfig\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting EventSource\",\"controller\":\"secret\",\"controllerGroup\":\"\",\"controllerKind\":\"Secret\",\"source\":\"kind source: *v1.Secret\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting Controller\",\"controller\":\"secret\",\"controllerGroup\":\"\",\"controllerKind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting Controller\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"starting status update manager\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting EventSource\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\",\"source\":\"kind source: *v1beta3.AuthConfig\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting Controller\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting workers\",\"controller\":\"secret\",\"controllerGroup\":\"\",\"controllerKind\":\"Secret\",\"worker count\":1}\n{\"level\":\"info\",\"ts\":\"2024-10-07T10:31:02+01:00\",\"logger\":\"authorino\",\"msg\":\"Starting workers\",\"controller\":\"authconfig\",\"controllerGroup\":\"authorino.kuadrant.io\",\"controllerKind\":\"AuthConfig\",\"worker count\":1}\n
      Reconciling an AuthConfig and 2 related API key secrets
      {\"level\":\"debug\",\"ts\":1669221208.7473805,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status changed\",\"authconfig\":\"default/talker-api-protection\",\"authconfig/status\":{\"conditions\":[{\"type\":\"Available\",\"status\":\"False\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"HostsNotLinked\",\"message\":\"No hosts linked to the resource\"},{\"type\":\"Ready\",\"status\":\"False\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"Unknown\"}],\"summary\":{\"ready\":false,\"hostsReady\":[],\"numHostsReady\":\"0/1\",\"numIdentitySources\":1,\"numMetadataSources\":0,\"numAuthorizationPolicies\":0,\"numResponseItems\":0,\"festivalWristbandEnabled\":false}}}\n{\"level\":\"info\",\"ts\":1669221208.7496614,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"info\",\"ts\":1669221208.7532616,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.7535005,\"logger\":\"authorino.controller.secret\",\"msg\":\"adding k8s secret to the index\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-1\",\"namespace\":\"default\",\"authconfig\":\"default/talker-api-protection\",\"config\":\"friends\"}\n{\"level\":\"debug\",\"ts\":1669221208.7535596,\"logger\":\"authorino.controller.secret.apikey\",\"msg\":\"api key added\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-1\",\"namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669221208.7536132,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-1\"}\n{\"level\":\"info\",\"ts\":1669221208.753772,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.753835,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status changed\",\"authconfig\":\"default/talker-api-protection\",\"authconfig/status\":{\"conditions\":[{\"type\":\"Available\",\"status\":\"True\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"HostsLinked\"},{\"type\":\"Ready\",\"status\":\"True\",\"lastTransitionTime\":\"2022-11-23T16:33:28Z\",\"reason\":\"Reconciled\"}],\"summary\":{\"ready\":true,\"hostsReady\":[\"talker-api.127.0.0.1.nip.io\"],\"numHostsReady\":\"1/1\",\"numIdentitySources\":1,\"numMetadataSources\":0,\"numAuthorizationPolicies\":0,\"numResponseItems\":0,\"festivalWristbandEnabled\":false}}}\n{\"level\":\"info\",\"ts\":1669221208.7571108,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"info\",\"ts\":1669221208.7573664,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.757429,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status did not change\",\"authconfig\":\"default/talker-api-protection\"}\n{\"level\":\"debug\",\"ts\":1669221208.7586699,\"logger\":\"authorino.controller.secret\",\"msg\":\"adding k8s secret to the index\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-2\",\"namespace\":\"default\",\"authconfig\":\"default/talker-api-protection\",\"config\":\"friends\"}\n{\"level\":\"debug\",\"ts\":1669221208.7586884,\"logger\":\"authorino.controller.secret.apikey\",\"msg\":\"api key added\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\",\"name\":\"api-key-2\",\"namespace\":\"default\"}\n{\"level\":\"info\",\"ts\":1669221208.7586913,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-2\"}\n{\"level\":\"debug\",\"ts\":1669221208.7597604,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status did not change\",\"authconfig\":\"default/talker-api-protection\"}\n
      Enforcing an AuthConfig with authentication based on Kubernetes tokens:

      • identity: k8s-auth, oidc, oauth2, apikey
      • metadata: http, oidc userinfo
      • authorization: opa, k8s-authz
      • response: wristband
      {\"level\":\"info\",\"ts\":1634830460.1486168,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"8157480586935853928\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830460.1491194,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"8157480586935853928\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830460,\"nanos\":147259000},\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"4c5d5c97-e15b-46a3-877a-d8188e09e08f\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830460.150506,\"logger\":\"authorino.service.auth.authpipeline.identity.kubernetesauth\",\"msg\":\"calling kubernetes token review api\",\"request id\":\"8157480586935853928\",\"tokenreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"token\":\"eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"audiences\":[\"talker-api\"]},\"status\":{\"user\":{}}}}\n{\"level\":\"debug\",\"ts\":1634830460.1509938,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830460.1517606,\"logger\":\"authorino.service.auth.authpipeline.identity.oauth2\",\"msg\":\"sending token introspection request\",\"request id\":\"8157480586935853928\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token/introspect\",\"data\":\"token=eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA&token_type_hint=requesting_party_token\"}\n{\"level\":\"debug\",\"ts\":1634830460.1620777,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"identity validated\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"aud\":[\"talker-api\"],\"exp\":1634831051,\"iat\":1634830451,\"iss\":\"https://kubernetes.default.svc.cluster.local\",\"kubernetes.io\":{\"namespace\":\"authorino\",\"serviceaccount\":{\"name\":\"api-consumer-1\",\"uid\":\"b40f531c-ecab-4f31-a496-2ebc72add121\"}},\"nbf\":1634830451,\"sub\":\"system:serviceaccount:authorino:api-consumer-1\"}}\n{\"level\":\"debug\",\"ts\":1634830460.1622565,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"requesting pat\",\"request id\":\"8157480586935853928\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token\",\"data\":\"grant_type=client_credentials\",\"headers\":{\"Content-Type\":[\"application/x-www-form-urlencoded\"]}}\n{\"level\":\"debug\",\"ts\":1634830460.1670353,\"logger\":\"authorino.service.auth.authpipeline.metadata.http\",\"msg\":\"sending request\",\"request id\":\"8157480586935853928\",\"method\":\"GET\",\"url\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path=/hello\",\"headers\":{\"Content-Type\":[\"text/plain\"]}}\n{\"level\":\"debug\",\"ts\":1634830460.169326,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"cannot fetch metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"oidc-userinfo\",\"UserInfo\":{\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"}},\"UMA\":null,\"GenericHTTP\":null},\"reason\":\"Missing identity for OIDC issuer http://keycloak:8080/realms/kuadrant. Skipping related UserInfo metadata.\"}\n{\"level\":\"debug\",\"ts\":1634830460.1753876,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"http-metadata\",\"UserInfo\":null,\"UMA\":null,\"GenericHTTP\":{\"Endpoint\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path={context.request.http.path}\",\"Method\":\"GET\",\"Parameters\":[],\"ContentType\":\"application/x-www-form-urlencoded\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"1aa6ac66-3179-4351-b1a7-7f6a761d5b61\"}}\n{\"level\":\"debug\",\"ts\":1634830460.2331996,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"querying resources by uri\",\"request id\":\"8157480586935853928\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set?uri=/hello\"}\n{\"level\":\"debug\",\"ts\":1634830460.2495668,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"getting resource data\",\"request id\":\"8157480586935853928\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set/e20d194c-274c-4845-8c02-0ca413c9bf18\"}\n{\"level\":\"debug\",\"ts\":1634830460.2927864,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"uma-resource-registry\",\"UserInfo\":null,\"UMA\":{\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"GenericHTTP\":null},\"object\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}\n{\"level\":\"debug\",\"ts\":1634830460.2930083,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"evaluating for input\",\"request id\":\"8157480586935853928\",\"input\":{\"context\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":53144}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830460,\"nanos\":147259000},\"http\":{\"id\":\"8157480586935853928\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"Bearer eyJhbGciOiJSUzI1NiIsImtpZCI6IkRsVWJZMENyVy1sZ0tFMVRMd19pcTFUWGtTYUl6T0hyWks0VHhKYnpEZUUifQ.eyJhdWQiOlsidGFsa2VyLWFwaSJdLCJleHAiOjE2MzQ4MzEwNTEsImlhdCI6MTYzNDgzMDQ1MSwiaXNzIjoiaHR0cHM6Ly9rdWJlcm5ldGVzLmRlZmF1bHQuc3ZjLmNsdXN0ZXIubG9jYWwiLCJrdWJlcm5ldGVzLmlvIjp7Im5hbWVzcGFjZSI6ImF1dGhvcmlubyIsInNlcnZpY2VhY2NvdW50Ijp7Im5hbWUiOiJhcGktY29uc3VtZXItMSIsInVpZCI6ImI0MGY1MzFjLWVjYWItNGYzMS1hNDk2LTJlYmM3MmFkZDEyMSJ9fSwibmJmIjoxNjM0ODMwNDUxLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6YXV0aG9yaW5vOmFwaS1jb25zdW1lci0xIn0.PaP0vqdl5DPfErr84KfVhPdlsGAPgsw0NkDaA9rne1zXjzcO7KPPbXhFwZC-oIjSGG1HfRMSoQeCXbQz24PSATmX8l1T52a9IFeXgP7sQmXZIDbiPfTm3X09kIIlfPKHhK_f-jQwRIpMRqNgLntlZ-xXX3P1fOBBUYR8obTPAQ6NDDaLHxw2SAmHFTQWjM_DInPDemXX0mEm7nCPKifsNxHaQH4wx4CD3LCLGbCI9FHNf2Crid8mmGJXf4wzcH1VuKkpUlsmnlUgTG2bfT2lbhSF2lBmrrhTJyYk6_aA09DwL4Bf4kvG-JtCq0Bkd_XynViIsOtOnAhgmdSPkfr-oA\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"4c5d5c97-e15b-46a3-877a-d8188e09e08f\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}},\"auth\":{\"identity\":{\"aud\":[\"talker-api\"],\"exp\":1634831051,\"iat\":1634830451,\"iss\":\"https://kubernetes.default.svc.cluster.local\",\"kubernetes.io\":{\"namespace\":\"authorino\",\"serviceaccount\":{\"name\":\"api-consumer-1\",\"uid\":\"b40f531c-ecab-4f31-a496-2ebc72add121\"}},\"nbf\":1634830451,\"sub\":\"system:serviceaccount:authorino:api-consumer-1\"},\"metadata\":{\"http-metadata\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"1aa6ac66-3179-4351-b1a7-7f6a761d5b61\"},\"uma-resource-registry\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}}}}\n{\"level\":\"debug\",\"ts\":1634830460.2955465,\"logger\":\"authorino.service.auth.authpipeline.authorization.kubernetesauthz\",\"msg\":\"calling kubernetes subject access review api\",\"request id\":\"8157480586935853928\",\"subjectaccessreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"nonResourceAttributes\":{\"path\":\"/hello\",\"verb\":\"get\"},\"user\":\"system:serviceaccount:authorino:api-consumer-1\"},\"status\":{\"allowed\":false}}}\n{\"level\":\"debug\",\"ts\":1634830460.2986183,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"my-policy\",\"OPA\":{\"Rego\":\"fail := input.context.request.http.headers[\\\"x-ext-auth-mock\\\"] == \\\"FAIL\\\"\\nallow { not fail }\\n\",\"OPAExternalSource\":{\"Endpoint\":\"\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"JSON\":null,\"KubernetesAuthz\":null},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830460.3044975,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"kubernetes-rbac\",\"OPA\":null,\"JSON\":null,\"KubernetesAuthz\":{\"Conditions\":[],\"User\":{\"Static\":\"\",\"Pattern\":\"auth.identity.user.username\"},\"Groups\":null,\"ResourceAttributes\":null}},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830460.3052874,\"logger\":\"authorino.service.auth.authpipeline.response\",\"msg\":\"dynamic response built\",\"request id\":\"8157480586935853928\",\"config\":{\"Name\":\"wristband\",\"Wrapper\":\"httpHeader\",\"WrapperKey\":\"x-ext-auth-wristband\",\"Wristband\":{\"Issuer\":\"https://authorino-oidc.default.svc:8083/default/talker-api-protection/wristband\",\"CustomClaims\":[],\"TokenDuration\":300,\"SigningKeys\":[{\"use\":\"sig\",\"kty\":\"EC\",\"kid\":\"wristband-signing-key\",\"crv\":\"P-256\",\"alg\":\"ES256\",\"x\":\"TJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZw\",\"y\":\"SSg8rKBsJ3J1LxyLtt0oFvhHvZcUpmRoTuHk3UHisTA\",\"d\":\"Me-5_zWBWVYajSGZcZMCcD8dXEa4fy85zv_yN7BxW-o\"}]},\"DynamicJSON\":null},\"object\":\"eyJhbGciOiJFUzI1NiIsImtpZCI6IndyaXN0YmFuZC1zaWduaW5nLWtleSIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzQ4MzA3NjAsImlhdCI6MTYzNDgzMDQ2MCwiaXNzIjoiaHR0cHM6Ly9hdXRob3Jpbm8tb2lkYy5hdXRob3Jpbm8uc3ZjOjgwODMvYXV0aG9yaW5vL3RhbGtlci1hcGktcHJvdGVjdGlvbi93cmlzdGJhbmQiLCJzdWIiOiI4NDliMDk0ZDA4MzU0ZjM0MjA4ZGI3MjBmYWZmODlmNmM3NmYyOGY3MTcxOWI4NTQ3ZDk5NWNlNzAwMjU2ZGY4In0.Jn-VB5Q_0EX1ed1ji4KvhO4DlMqZeIl5H0qlukbTyYkp-Pgb4SnPGSbYWp5_uvG8xllsFAA5nuyBIXeba-dbkw\"}\n{\"level\":\"info\",\"ts\":1634830460.3054585,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"8157480586935853928\",\"authorized\":true,\"response\":\"OK\"}\n{\"level\":\"debug\",\"ts\":1634830460.305476,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"8157480586935853928\",\"authorized\":true,\"response\":\"OK\"}\n
      Enforcing an AuthConfig with authentication based on API keys

      • identity: k8s-auth, oidc, oauth2, apikey
      • metadata: http, oidc userinfo
      • authorization: opa, k8s-authz
      • response: wristband
      {\"level\":\"info\",\"ts\":1634830413.2425854,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"7199257136822741594\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830413.2426975,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"7199257136822741594\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830413,\"nanos\":240094000},\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"d38f5e66-bd72-4733-95d1-3179315cdd60\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830413.2428744,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830413.2434332,\"logger\":\"authorino.service.auth.authpipeline\",\"msg\":\"skipping config\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"keycloak-jwts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"},\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"context canceled\"}\n{\"level\":\"debug\",\"ts\":1634830413.2479305,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"identity validated\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"object\":{\"apiVersion\":\"v1\",\"data\":{\"api_key\":\"bmR5QnpyZVV6RjR6cURRc3FTUE1Ia1JocmlFT3RjUng=\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Secret\\\",\\\"metadata\\\":{\\\"annotations\\\":{\\\"userid\\\":\\\"john\\\"},\\\"labels\\\":{\\\"audience\\\":\\\"talker-api\\\",\\\"authorino.kuadrant.io/managed-by\\\":\\\"authorino\\\"},\\\"name\\\":\\\"api-key-1\\\",\\\"namespace\\\":\\\"authorino\\\"},\\\"stringData\\\":{\\\"api_key\\\":\\\"ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\\\"},\\\"type\\\":\\\"Opaque\\\"}\\n\",\"userid\":\"john\"},\"creationTimestamp\":\"2021-10-21T14:45:54Z\",\"labels\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"},\"managedFields\":[{\"apiVersion\":\"v1\",\"fieldsType\":\"FieldsV1\",\"fieldsV1\":{\"f:data\":{\".\":{},\"f:api_key\":{}},\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:kubectl.kubernetes.io/last-applied-configuration\":{},\"f:userid\":{}},\"f:labels\":{\".\":{},\"f:audience\":{},\"f:authorino.kuadrant.io/managed-by\":{}}},\"f:type\":{}},\"manager\":\"kubectl-client-side-apply\",\"operation\":\"Update\",\"time\":\"2021-10-21T14:45:54Z\"}],\"name\":\"api-key-1\",\"namespace\":\"authorino\",\"resourceVersion\":\"8979\",\"uid\":\"c369852a-7e1a-43bd-94ca-e2b3f617052e\"},\"sub\":\"john\",\"type\":\"Opaque\"}}\n{\"level\":\"debug\",\"ts\":1634830413.248768,\"logger\":\"authorino.service.auth.authpipeline.metadata.http\",\"msg\":\"sending request\",\"request id\":\"7199257136822741594\",\"method\":\"GET\",\"url\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path=/hello\",\"headers\":{\"Content-Type\":[\"text/plain\"]}}\n{\"level\":\"debug\",\"ts\":1634830413.2496722,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"cannot fetch metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"oidc-userinfo\",\"UserInfo\":{\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"}},\"UMA\":null,\"GenericHTTP\":null},\"reason\":\"Missing identity for OIDC issuer http://keycloak:8080/realms/kuadrant. Skipping related UserInfo metadata.\"}\n{\"level\":\"debug\",\"ts\":1634830413.2497928,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"requesting pat\",\"request id\":\"7199257136822741594\",\"url\":\"http://talker-api:523b92b6-625d-4e1e-a313-77e7a8ae4e88@keycloak:8080/realms/kuadrant/protocol/openid-connect/token\",\"data\":\"grant_type=client_credentials\",\"headers\":{\"Content-Type\":[\"application/x-www-form-urlencoded\"]}}\n{\"level\":\"debug\",\"ts\":1634830413.258932,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"http-metadata\",\"UserInfo\":null,\"UMA\":null,\"GenericHTTP\":{\"Endpoint\":\"http://talker-api.default.svc.cluster.local:3000/metadata?encoding=text/plain&original_path={context.request.http.path}\",\"Method\":\"GET\",\"Parameters\":[],\"ContentType\":\"application/x-www-form-urlencoded\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"object\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"97529f8c-587b-4121-a4db-cd90c63871fd\"}}\n{\"level\":\"debug\",\"ts\":1634830413.2945344,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"querying resources by uri\",\"request id\":\"7199257136822741594\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set?uri=/hello\"}\n{\"level\":\"debug\",\"ts\":1634830413.3123596,\"logger\":\"authorino.service.auth.authpipeline.metadata.uma\",\"msg\":\"getting resource data\",\"request id\":\"7199257136822741594\",\"url\":\"http://keycloak:8080/realms/kuadrant/authz/protection/resource_set/e20d194c-274c-4845-8c02-0ca413c9bf18\"}\n{\"level\":\"debug\",\"ts\":1634830413.3340268,\"logger\":\"authorino.service.auth.authpipeline.metadata\",\"msg\":\"fetched auth metadata\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"uma-resource-registry\",\"UserInfo\":null,\"UMA\":{\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"GenericHTTP\":null},\"object\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}\n{\"level\":\"debug\",\"ts\":1634830413.3367748,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"evaluating for input\",\"request id\":\"7199257136822741594\",\"input\":{\"context\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52702}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830413,\"nanos\":240094000},\"http\":{\"id\":\"7199257136822741594\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"d38f5e66-bd72-4733-95d1-3179315cdd60\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}},\"auth\":{\"identity\":{\"apiVersion\":\"v1\",\"data\":{\"api_key\":\"bmR5QnpyZVV6RjR6cURRc3FTUE1Ia1JocmlFT3RjUng=\"},\"kind\":\"Secret\",\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"v1\\\",\\\"kind\\\":\\\"Secret\\\",\\\"metadata\\\":{\\\"annotations\\\":{\\\"userid\\\":\\\"john\\\"},\\\"labels\\\":{\\\"audience\\\":\\\"talker-api\\\",\\\"authorino.kuadrant.io/managed-by\\\":\\\"authorino\\\"},\\\"name\\\":\\\"api-key-1\\\",\\\"namespace\\\":\\\"authorino\\\"},\\\"stringData\\\":{\\\"api_key\\\":\\\"ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\\\"},\\\"type\\\":\\\"Opaque\\\"}\\n\",\"userid\":\"john\"},\"creationTimestamp\":\"2021-10-21T14:45:54Z\",\"labels\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"},\"managedFields\":[{\"apiVersion\":\"v1\",\"fieldsType\":\"FieldsV1\",\"fieldsV1\":{\"f:data\":{\".\":{},\"f:api_key\":{}},\"f:metadata\":{\"f:annotations\":{\".\":{},\"f:kubectl.kubernetes.io/last-applied-configuration\":{},\"f:userid\":{}},\"f:labels\":{\".\":{},\"f:audience\":{},\"f:authorino.kuadrant.io/managed-by\":{}}},\"f:type\":{}},\"manager\":\"kubectl-client-side-apply\",\"operation\":\"Update\",\"time\":\"2021-10-21T14:45:54Z\"}],\"name\":\"api-key-1\",\"namespace\":\"authorino\",\"resourceVersion\":\"8979\",\"uid\":\"c369852a-7e1a-43bd-94ca-e2b3f617052e\"},\"sub\":\"john\",\"type\":\"Opaque\"},\"metadata\":{\"http-metadata\":{\"body\":\"\",\"headers\":{\"Accept-Encoding\":\"gzip\",\"Content-Type\":\"text/plain\",\"Host\":\"talker-api.default.svc.cluster.local:3000\",\"User-Agent\":\"Go-http-client/1.1\",\"Version\":\"HTTP/1.1\"},\"method\":\"GET\",\"path\":\"/metadata\",\"query_string\":\"encoding=text/plain&original_path=/hello\",\"uuid\":\"97529f8c-587b-4121-a4db-cd90c63871fd\"},\"uma-resource-registry\":[{\"_id\":\"e20d194c-274c-4845-8c02-0ca413c9bf18\",\"attributes\":{},\"displayName\":\"hello\",\"name\":\"hello\",\"owner\":{\"id\":\"57a645a5-fb67-438b-8be5-dfb971666dbc\"},\"ownerManagedAccess\":false,\"resource_scopes\":[],\"uris\":[\"/hi\",\"/hello\"]}]}}}}\n{\"level\":\"debug\",\"ts\":1634830413.339894,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"my-policy\",\"OPA\":{\"Rego\":\"fail := input.context.request.http.headers[\\\"x-ext-auth-mock\\\"] == \\\"FAIL\\\"\\nallow { not fail }\\n\",\"OPAExternalSource\":{\"Endpoint\":\"\",\"SharedSecret\":\"\",\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"JSON\":null,\"KubernetesAuthz\":null},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830413.3444238,\"logger\":\"authorino.service.auth.authpipeline.authorization.kubernetesauthz\",\"msg\":\"calling kubernetes subject access review api\",\"request id\":\"7199257136822741594\",\"subjectaccessreview\":{\"metadata\":{\"creationTimestamp\":null},\"spec\":{\"nonResourceAttributes\":{\"path\":\"/hello\",\"verb\":\"get\"},\"user\":\"john\"},\"status\":{\"allowed\":false}}}\n{\"level\":\"debug\",\"ts\":1634830413.3547812,\"logger\":\"authorino.service.auth.authpipeline.authorization\",\"msg\":\"access granted\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"kubernetes-rbac\",\"OPA\":null,\"JSON\":null,\"KubernetesAuthz\":{\"Conditions\":[],\"User\":{\"Static\":\"\",\"Pattern\":\"auth.identity.user.username\"},\"Groups\":null,\"ResourceAttributes\":null}},\"object\":true}\n{\"level\":\"debug\",\"ts\":1634830413.3558292,\"logger\":\"authorino.service.auth.authpipeline.response\",\"msg\":\"dynamic response built\",\"request id\":\"7199257136822741594\",\"config\":{\"Name\":\"wristband\",\"Wrapper\":\"httpHeader\",\"WrapperKey\":\"x-ext-auth-wristband\",\"Wristband\":{\"Issuer\":\"https://authorino-oidc.default.svc:8083/default/talker-api-protection/wristband\",\"CustomClaims\":[],\"TokenDuration\":300,\"SigningKeys\":[{\"use\":\"sig\",\"kty\":\"EC\",\"kid\":\"wristband-signing-key\",\"crv\":\"P-256\",\"alg\":\"ES256\",\"x\":\"TJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZw\",\"y\":\"SSg8rKBsJ3J1LxyLtt0oFvhHvZcUpmRoTuHk3UHisTA\",\"d\":\"Me-5_zWBWVYajSGZcZMCcD8dXEa4fy85zv_yN7BxW-o\"}]},\"DynamicJSON\":null},\"object\":\"eyJhbGciOiJFUzI1NiIsImtpZCI6IndyaXN0YmFuZC1zaWduaW5nLWtleSIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2MzQ4MzA3MTMsImlhdCI6MTYzNDgzMDQxMywiaXNzIjoiaHR0cHM6Ly9hdXRob3Jpbm8tb2lkYy5hdXRob3Jpbm8uc3ZjOjgwODMvYXV0aG9yaW5vL3RhbGtlci1hcGktcHJvdGVjdGlvbi93cmlzdGJhbmQiLCJzdWIiOiI5NjhiZjViZjk3MDM3NWRiNjE0ZDFhMDgzZTg2NTBhYTVhMGVhMzAyOTdiYmJjMTBlNWVlMWZmYTkxYTYwZmY4In0.7G440sWgi2TIaxrGJf5KWR9UOFpNTjwVYeaJXFLzsLhVNICoMLbYzBAEo4M3ym1jipxxTVeE7anm4qDDc7cnVQ\"}\n{\"level\":\"info\",\"ts\":1634830413.3569078,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"7199257136822741594\",\"authorized\":true,\"response\":\"OK\"}\n{\"level\":\"debug\",\"ts\":1634830413.3569596,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"7199257136822741594\",\"authorized\":true,\"response\":\"OK\"}\n
      Enforcing an AuthConfig with authentication based on API keys (invalid API key)

      • identity: k8s-auth, oidc, oauth2, apikey
      • metadata: http, oidc userinfo
      • authorization: opa, k8s-authz
      • response: wristband
      {\"level\":\"info\",\"ts\":1634830373.2066543,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"12947265773116138711\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52288}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"http\":{\"id\":\"12947265773116138711\",\"method\":\"GET\",\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\"}}}}\n{\"level\":\"debug\",\"ts\":1634830373.2068064,\"logger\":\"authorino.service.auth\",\"msg\":\"incoming authorization request\",\"request id\":\"12947265773116138711\",\"object\":{\"source\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":52288}}}}},\"destination\":{\"address\":{\"Address\":{\"SocketAddress\":{\"address\":\"127.0.0.1\",\"PortSpecifier\":{\"PortValue\":8000}}}}},\"request\":{\"time\":{\"seconds\":1634830373,\"nanos\":198329000},\"http\":{\"id\":\"12947265773116138711\",\"method\":\"GET\",\"headers\":{\":authority\":\"talker-api\",\":method\":\"GET\",\":path\":\"/hello\",\":scheme\":\"http\",\"accept\":\"*/*\",\"authorization\":\"APIKEY invalid\",\"user-agent\":\"curl/7.65.3\",\"x-envoy-internal\":\"true\",\"x-forwarded-for\":\"10.244.0.11\",\"x-forwarded-proto\":\"http\",\"x-request-id\":\"9e391846-afe4-489a-8716-23a2e1c1aa77\"},\"path\":\"/hello\",\"host\":\"talker-api\",\"scheme\":\"http\",\"protocol\":\"HTTP/1.1\"}},\"context_extensions\":{\"virtual_host\":\"local_service\"},\"metadata_context\":{}}}\n{\"level\":\"debug\",\"ts\":1634830373.2070816,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"keycloak-opaque\",\"ExtendedProperties\":[],\"OAuth2\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"TokenIntrospectionUrl\":\"http://keycloak:8080/realms/kuadrant/protocol/openid-connect/token/introspect\",\"TokenTypeHint\":\"requesting_party_token\",\"ClientID\":\"talker-api\",\"ClientSecret\":\"523b92b6-625d-4e1e-a313-77e7a8ae4e88\"},\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830373.207225,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"api-keys\",\"ExtendedProperties\":[{\"Name\":\"sub\",\"Value\":{\"Static\":null,\"Pattern\":\"auth.identity.metadata.annotations.userid\"}}],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":{\"AuthCredentials\":{\"KeySelector\":\"APIKEY\",\"In\":\"authorization_header\"},\"Name\":\"api-keys\",\"LabelSelectors\":{\"audience\":\"talker-api\",\"authorino.kuadrant.io/managed-by\":\"authorino\"}},\"KubernetesAuth\":null},\"reason\":\"the API Key provided is invalid\"}\n{\"level\":\"debug\",\"ts\":1634830373.2072473,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"k8s-service-accounts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":null,\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"}}},\"reason\":\"credential not found\"}\n{\"level\":\"debug\",\"ts\":1634830373.2072592,\"logger\":\"authorino.service.auth.authpipeline.identity\",\"msg\":\"cannot validate identity\",\"request id\":\"12947265773116138711\",\"config\":{\"Name\":\"keycloak-jwts\",\"ExtendedProperties\":[],\"OAuth2\":null,\"OIDC\":{\"AuthCredentials\":{\"KeySelector\":\"Bearer\",\"In\":\"authorization_header\"},\"Endpoint\":\"http://keycloak:8080/realms/kuadrant\"},\"MTLS\":null,\"HMAC\":null,\"APIKey\":null,\"KubernetesAuth\":null},\"reason\":\"credential not found\"}\n{\"level\":\"info\",\"ts\":1634830373.2073083,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"12947265773116138711\",\"authorized\":false,\"response\":\"UNAUTHENTICATED\",\"object\":{\"code\":16,\"status\":302,\"message\":\"Redirecting to login\"}}\n{\"level\":\"debug\",\"ts\":1634830373.2073889,\"logger\":\"authorino.service.auth\",\"msg\":\"outgoing authorization response\",\"request id\":\"12947265773116138711\",\"authorized\":false,\"response\":\"UNAUTHENTICATED\",\"object\":{\"code\":16,\"status\":302,\"message\":\"Redirecting to login\",\"headers\":[{\"Location\":\"https://my-app.io/login\"}]}}\n
      Deleting an AuthConfig and 2 related API key secrets
      {\"level\":\"info\",\"ts\":1669221361.5032296,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-1\"}\n{\"level\":\"info\",\"ts\":1669221361.5057878,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"default/api-key-2\"}\n
      Shutting down the service
      {\"level\":\"info\",\"ts\":1669221635.0135982,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for non leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0136683,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0135982,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for non leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0136883,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for leader election runnables\"}\n{\"level\":\"info\",\"ts\":1669221635.0137057,\"logger\":\"authorino.controller.secret\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669221635.013724,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.01375,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"All workers finished\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.013752,\"logger\":\"authorino.controller.secret\",\"msg\":\"All workers finished\",\"reconciler group\":\"\",\"reconciler kind\":\"Secret\"}\n{\"level\":\"info\",\"ts\":1669221635.0137632,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for caches\"}\n{\"level\":\"info\",\"ts\":1669221635.013751,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"Shutdown signal received, waiting for all workers to finish\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.0137684,\"logger\":\"authorino.controller.authconfig\",\"msg\":\"All workers finished\",\"reconciler group\":\"authorino.kuadrant.io\",\"reconciler kind\":\"AuthConfig\"}\n{\"level\":\"info\",\"ts\":1669221635.0137722,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for caches\"}\n{\"level\":\"info\",\"ts\":1669221635.0138857,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for webhooks\"}\n{\"level\":\"info\",\"ts\":1669221635.0138955,\"logger\":\"authorino\",\"msg\":\"Wait completed, proceeding to shutdown the manager\"}\n{\"level\":\"info\",\"ts\":1669221635.0138893,\"logger\":\"authorino\",\"msg\":\"Stopping and waiting for webhooks\"}\n{\"level\":\"info\",\"ts\":1669221635.0139785,\"logger\":\"authorino\",\"msg\":\"Wait completed, proceeding to shutdown the manager\"}\n
      "},{"location":"authorino/docs/user-guides/observability/#tracing","title":"Tracing","text":""},{"location":"authorino/docs/user-guides/observability/#request-id","title":"Request ID","text":"

      Processes related to the authorization request are identified and linked together by a request ID. The request ID can be:

      • generated outside Authorino and passed in the authorization request \u2013 this is essentially the case of requests via GRPC authorization interface initiated by the Envoy;
      • generated by Authorino \u2013 requests via Raw HTTP Authorization interface.
      "},{"location":"authorino/docs/user-guides/observability/#propagation","title":"Propagation","text":"

      Authorino propagates trace identifiers compatible with the W3C Trace Context format https://www.w3.org/TR/trace-context/ and user-defined baggage data in the W3C Baggage format https://www.w3.org/TR/baggage.

      "},{"location":"authorino/docs/user-guides/observability/#log-tracing","title":"Log tracing","text":"

      Most log messages associated with an authorization request include the request id value. This value can be used to match incoming request and corresponding outgoing response log messages, including at deep level when more fine-grained log details are enabled (debug level level).

      "},{"location":"authorino/docs/user-guides/observability/#opentelemetry-integration","title":"OpenTelemetry integration","text":"

      Integration with an OpenTelemetry collector can be enabled by supplying the --tracing-service-endpoint command-line flag (e.g. authorino server --tracing-service-endpoint=http://jaeger:14268/api/traces).

      The additional --tracing-service-tags command-line flag allow to specify fixed agent-level key-value tags for the trace signals emitted by Authorino (e.g. authorino server --tracing-service-endpoint=... --tracing-service-tag=key1=value1 --tracing-service-tag=key2=value2).

      Traces related to authorization requests are additionally tagged with the authorino.request_id attribute.

      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/","title":"User guide: OpenID Connect Discovery and authentication with JWTs","text":"

      Validate JSON Web Tokens (JWT) issued and signed by an OpenID Connect server; leverage OpenID Connect Discovery to automatically fetch JSON Web Key Sets (JWKS).

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 JWT verification

      Authorino validates JSON Web Tokens (JWT) issued by an OpenID Connect server that implements OpenID Connect Discovery. Authorino fetches the OpenID Connect configuration and JSON Web Key Set (JWKS) from the issuer endpoint, and verifies the JSON Web Signature (JWS) and time validity of the token.

      Important! Authorino does not implement OAuth2 grants nor OIDC authentication flows. As a common recommendation of good practice, obtaining and refreshing access tokens is for clients to negotiate directly with the auth servers and token issuers. Authorino will only validate those tokens using the parameters provided by the trusted issuer authorities.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
      • jq, to extract parts of JSON responses

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\nEOF\n
      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#consume-the-api","title":"\u277c Consume the API","text":"

      With a valid access token:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      With missing or invalid access token:

      curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: Bearer realm=\"keycloak-kuadrant-realm\"\n# x-ext-auth-reason: credential not found\n
      "},{"location":"authorino/docs/user-guides/oidc-jwt-authentication/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/oidc-rbac/","title":"User guide: OpenID Connect (OIDC) and Role-Based Access Control (RBAC) with Authorino and Keycloak","text":"

      Combine OpenID Connect (OIDC) authentication and Role-Based Access Control (RBAC) authorization rules leveraging Keycloak and Authorino working together.

      In this user guide, you will learn via example how to implement a simple Role-Based Access Control (RBAC) system to protect endpoints of an API, with roles assigned to users of an Identity Provider (Keycloak) and carried within the access tokens as JSON Web Token (JWT) claims. Users authenticate with the IdP via OAuth2/OIDC flow and get their access tokens verified and validated by Authorino on every request. Moreover, Authorino reads the role bindings of the user and enforces the proper RBAC rules based upon the context.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 JWT verification
      • Authorization \u2192 Pattern-matching authorization

      Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/oidc-rbac/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
      • jq, to extract parts of JSON responses

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/oidc-rbac/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/oidc-rbac/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/oidc-rbac/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/oidc-rbac/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/oidc-rbac/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      In this example, the Keycloak realm defines a few users and 2 realm roles: 'member' and 'admin'. When users authenticate to the Keycloak server by any of the supported OAuth2/OIDC flows, Keycloak adds to the access token JWT a claim \"realm_access\": { \"roles\": array } that holds the list of roles assigned to the user. Authorino will verify the JWT on requests to the API and read from that claim to enforce the following RBAC rules:

      Path Method Role /resources[/*] GET / POST / PUT member /resources/{id} DELETE admin /admin[/*] * admin Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.

      Apply the AuthConfig:

      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n\n  patterns:\n    \"member-role\":\n\n    - selector: auth.identity.realm_access.roles\n      operator: incl\n      value: member\n    \"admin-role\":\n    - selector: auth.identity.realm_access.roles\n      operator: incl\n      value: admin\n\n  authorization:\n    # RBAC rule: 'member' role required for requests to /resources[/*]\n    \"rbac-resources-api\":\n      when:\n\n      - selector: context.request.http.path\n        operator: matches\n        value: ^/resources(/.*)?$\n      patternMatching:\n        patterns:\n        - patternRef: member-role\n\n    # RBAC rule: 'admin' role required for DELETE requests to /resources/{id}\n    \"rbac-delete-resource\":\n      when:\n\n      - selector: context.request.http.path\n        operator: matches\n        value: ^/resources/\\d+$\n      - selector: context.request.http.method\n        operator: eq\n        value: DELETE\n      patternMatching:\n        patterns:\n        - patternRef: admin-role\n\n    # RBAC rule: 'admin' role required for requests to /admin[/*]\n    \"rbac-admin-api\":\n      when:\n\n      - selector: context.request.http.path\n        operator: matches\n        value: ^/admin(/.*)?$\n      patternMatching:\n        patterns:\n        - patternRef: admin-role\nEOF\n
      "},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api","title":"\u277b Obtain an access token and consume the API","text":""},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"

      Obtain an access token with the Keycloak server for John:

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster for the user John, who is assigned to the 'member' role:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      As John, send a GET request to /resources:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/resources -i\n# HTTP/1.1 200 OK\n

      As John, send a DELETE request to /resources/123:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/resources/123 -i\n# HTTP/1.1 403 Forbidden\n

      As John, send a GET request to /admin/settings:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/admin/settings -i\n# HTTP/1.1 403 Forbidden\n
      "},{"location":"authorino/docs/user-guides/oidc-rbac/#obtain-an-access-token-and-consume-the-api-as-jane-memberadmin","title":"Obtain an access token and consume the API as Jane (member/admin)","text":"

      Obtain an access token from within the cluster for the user Jane, who is assigned to the 'member' and 'admin' roles:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      As Jane, send a GET request to /resources:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/resources -i\n# HTTP/1.1 200 OK\n

      As Jane, send a DELETE request to /resources/123:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/resources/123 -i\n# HTTP/1.1 200 OK\n

      As Jane, send a GET request to /admin/settings:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/admin/settings -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/oidc-rbac/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/oidc-user-info/","title":"User guide: OpenID Connect UserInfo","text":"

      Fetch user info for OpenID Connect ID tokens in request-time for extra metadata for your policies and online verification of token validity.

      Authorino capabilities featured in this guide:
      • External auth metadata \u2192 OIDC UserInfo
      • Identity verification & authentication \u2192 JWT verification
      • Authorization \u2192 Pattern-matching authorization

      Apart from possibly complementing information of the JWT, fetching OpenID Connect UserInfo in request-time can be particularly useful for remote checking the state of the session, as opposed to only verifying the JWT/JWS offline. Implementation requires an OpenID Connect issuer (spec.identity.oidc) configured in the same AuthConfig.

      Check out as well the user guide about OpenID Connect Discovery and authentication with JWTs.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/oidc-user-info/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
      • jq, to extract parts of JSON responses

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/oidc-user-info/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/oidc-user-info/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/oidc-user-info/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/oidc-user-info/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/oidc-user-info/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  metadata:\n    \"userinfo\":\n      userInfo:\n        identitySource: keycloak-kuadrant-realm\n  authorization:\n    \"active-tokens-only\":\n      patternMatching:\n        patterns:\n        - selector: \"auth.metadata.userinfo.email\" # user email expected from the userinfo instead of the jwt\n          operator: neq\n          value: \"\"\nEOF\n
      "},{"location":"authorino/docs/user-guides/oidc-user-info/#obtain-an-access-token-with-the-keycloak-server","title":"\u277b Obtain an access token with the Keycloak server","text":"

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster:

      export $(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r '\"ACCESS_TOKEN=\"+.access_token,\"REFRESH_TOKEN=\"+.refresh_token')\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      "},{"location":"authorino/docs/user-guides/oidc-user-info/#consume-the-api","title":"\u277c Consume the API","text":"

      With a valid access token:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      Revoke the access token and try to consume the API again:

      kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/logout -H \"Content-Type: application/x-www-form-urlencoded\" -d \"refresh_token=$REFRESH_TOKEN\" -d 'token_type_hint=requesting_party_token' -u demo:\n
      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n
      "},{"location":"authorino/docs/user-guides/oidc-user-info/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/opa-authorization/","title":"User guide: Open Policy Agent (OPA) Rego policies","text":"

      Leverage the power of Open Policy Agent (OPA) policies, evaluated against Authorino's Authorization JSON in a built-in runtime compiled together with Authorino; pre-cache policies defined in Rego language inline or fetched from an external policy registry.

      Authorino capabilities featured in this guide:
      • Authorization \u2192 Open Policy Agent (OPA) Rego policies
      • Identity verification & authentication \u2192 API key

      Authorino supports Open Policy Agent policies, either inline defined in Rego language as part of the AuthConfig or fetched from an external endpoint, such as an OPA Policy Registry.

      Authorino's built-in OPA module precompiles the policies in reconciliation-time and cache them for fast evaluation in request-time, where they receive the Authorization JSON as input.

      Check out as well the user guide about Authentication with API keys.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/opa-authorization/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/opa-authorization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/opa-authorization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/opa-authorization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/opa-authorization/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/opa-authorization/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      In this example, we will use OPA to implement a read-only policy for requests coming from outside a trusted network (IP range 192.168.1/24).

      The implementation relies on the X-Forwarded-For HTTP header to read the client's IP address.5

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n  authorization:\n    \"read-only-outside\":\n      opa:\n        rego: |\n          ips := split(input.context.request.http.headers[\"x-forwarded-for\"], \",\")\n          trusted_network { net.cidr_contains(\"192.168.1.1/24\", ips[0]) }\n\n          allow { trusted_network }\n          allow { not trusted_network; input.context.request.http.method == \"GET\" }\nEOF\n
      "},{"location":"authorino/docs/user-guides/opa-authorization/#create-the-api-key","title":"\u277b Create the API key","text":"
      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/opa-authorization/#consume-the-api","title":"\u277c Consume the API","text":"

      Inside the trusted network:

      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 192.168.1.10' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 192.168.1.10' \\\n     -X POST \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      Outside the trusted network:

      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 123.45.6.78' \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n
      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' \\\n     -H 'X-Forwarded-For: 123.45.6.78' \\\n     -X POST \\\n     http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n# x-ext-auth-reason: Unauthorized\n
      "},{"location":"authorino/docs/user-guides/opa-authorization/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      5. You can also set use_remote_address: true in the Envoy route configuration, so the proxy will append its IP address instead of run in transparent mode. This setting will also ensure real remote address of the client connection passed in the x-envoy-external-address HTTP header, which can be used to simplify the read-only policy in remote environment.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/passing-credentials/","title":"User guide: Passing credentials (Authorization header, cookie headers and others)","text":"

      Customize where credentials are supplied in the request by each trusted source of identity.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 Auth credentials
      • Identity verification & authentication \u2192 API key

      Authentication tokens can be supplied in the Authorization header, in a custom header, cookie or query string parameter.

      Check out as well the user guide about Authentication with API keys.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/passing-credentials/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/passing-credentials/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/passing-credentials/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/passing-credentials/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/passing-credentials/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/passing-credentials/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      In this example, member users can authenticate supplying the API key in any of 4 different ways:

      • HTTP header Authorization: APIKEY <api-key>
      • HTTP header X-API-Key: <api-key>
      • Query string parameter api_key=<api-key>
      • Cookie Cookie: APIKEY=<api-key>;

      admin API keys are only accepted in the (default) HTTP header Authorization: Bearer <api-key>.

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"members-authorization-header\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: members\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY # instead of the default prefix 'Bearer'\n    \"members-custom-header\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: members\n      credentials:\n        customHeader:\n          name: X-API-Key\n    \"members-query-string-param\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: members\n      credentials:\n        queryString:\n          name: api_key\n    \"members-cookie\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: members\n      credentials:\n        cookie:\n          name: APIKEY\n    \"admins\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: admins\nEOF\n
      "},{"location":"authorino/docs/user-guides/passing-credentials/#create-the-api-keys","title":"\u277b Create the API keys","text":"

      For a member user:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: members\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n

      For an admin user:

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-2\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: admins\nstringData:\n  api_key: 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/passing-credentials/#consume-the-api","title":"\u277c Consume the API","text":"

      As member user, passing the API key in the Authorization header:

      curl -H 'Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      As member user, passing the API key in the custom X-API-Key header:

      curl -H 'X-API-Key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      As member user, passing the API key in the query string parameter api_key:

      curl \"http://talker-api.127.0.0.1.nip.io:8000/hello?api_key=ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\"\n# HTTP/1.1 200 OK\n

      As member user, passing the API key in the APIKEY cookie header:

      curl -H 'Cookie: APIKEY=ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx;foo=bar' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      As admin user:

      curl -H 'Authorization: Bearer 7BNaTmYGItSzXiwQLNHu82+x52p1XHgY' http://talker-api.127.0.0.1.nip.io:8000/hello\n# HTTP/1.1 200 OK\n

      Missing the API key:

      curl http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 401 Unauthorized\n# www-authenticate: APIKEY realm=\"members-authorization-header\"\n# www-authenticate: X-API-Key realm=\"members-custom-header\"\n# www-authenticate: api_key realm=\"members-query-string-param\"\n# www-authenticate: APIKEY realm=\"members-cookie\"\n# www-authenticate: Bearer realm=\"admins\"\n# x-ext-auth-reason: {\"admins\":\"credential not found\",\"members-authorization-header\":\"credential not found\",\"members-cookie\":\"credential not found\",\"members-custom-header\":\"credential not found\",\"members-query-string-param\":\"credential not found\"}\n
      "},{"location":"authorino/docs/user-guides/passing-credentials/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete secret/api-key-1\nkubectl delete secret/api-key-2\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/","title":"User guide: Resource-level authorization with User-Managed Access (UMA) resource registry","text":"

      Fetch resource metadata relevant for your authorization policies from Keycloak authorization clients, using User-Managed Access (UMA) protocol.

      Authorino capabilities featured in this guide:
      • External auth metadata \u2192 User-Managed Access (UMA) resource registry
      • Identity verification & authentication \u2192 JWT verification
      • Authorization \u2192 Open Policy Agent (OPA) Rego policies

      Check out as well the user guides about OpenID Connect Discovery and authentication with JWTs and Open Policy Agent (OPA) Rego policies.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
      • jq, to extract parts of JSON responses

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced:

      This example of resource-level authorization leverages part of Keycloak's User-Managed Access (UMA) support. Authorino will fetch resource attributes stored in a Keycloak resource server client.

      The Keycloak server also provides the identities. The sub claim of the Keycloak-issued ID tokens must match the owner of the requested resource, identified by the URI of the request.

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.

      Create a required secret that will be used by Authorino to initiate the authentication with the UMA registry.

      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: talker-api-uma-credentials\nstringData:\n  clientID: talker-api\n  clientSecret: 523b92b6-625d-4e1e-a313-77e7a8ae4e88\ntype: Opaque\nEOF\n

      Create the config:

      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  metadata:\n    \"resource-data\":\n      uma:\n        endpoint: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n        credentialsRef:\n          name: talker-api-uma-credentials\n  authorization:\n    \"owned-resources\":\n      opa:\n        rego: |\n          COLLECTIONS = [\"greetings\"]\n\n          http_request = input.context.request.http\n          http_method = http_request.method\n          requested_path_sections = split(trim_left(trim_right(http_request.path, \"/\"), \"/\"), \"/\")\n\n          get { http_method == \"GET\" }\n          post { http_method == \"POST\" }\n          put { http_method == \"PUT\" }\n          delete { http_method == \"DELETE\" }\n\n          valid_collection { COLLECTIONS[_] == requested_path_sections[0] }\n\n          collection_endpoint {\n            valid_collection\n            count(requested_path_sections) == 1\n          }\n\n          resource_endpoint {\n            valid_collection\n            some resource_id\n            requested_path_sections[1] = resource_id\n          }\n\n          identity_owns_the_resource {\n            identity := input.auth.identity\n            resource_attrs := object.get(input.auth.metadata, \"resource-data\", [])[0]\n            resource_owner := object.get(object.get(resource_attrs, \"owner\", {}), \"id\", \"\")\n            resource_owner == identity.sub\n          }\n\n          allow { get;    collection_endpoint }\n          allow { post;   collection_endpoint }\n          allow { get;    resource_endpoint; identity_owns_the_resource }\n          allow { put;    resource_endpoint; identity_owns_the_resource }\n          allow { delete; resource_endpoint; identity_owns_the_resource }\nEOF\n

      The OPA policy owned-resource above enforces that all users can send GET and POST requests to /greetings, while only resource owners can send GET, PUT and DELETE requests to /greetings/{resource-id}.

      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-access-tokens-with-the-keycloak-server-and-consume-the-api","title":"\u277b Obtain access tokens with the Keycloak server and consume the API","text":""},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-john-and-consume-the-api","title":"Obtain an access token as John and consume the API","text":"

      Obtain an access token for user John (owner of the resource /greetings/1 in the UMA registry):

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      As John, send requests to the API:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2 -i\n# HTTP/1.1 403 Forbidden\n
      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-jane-and-consume-the-api","title":"Obtain an access token as Jane and consume the API","text":"

      Obtain an access token for user Jane (owner of the resource /greetings/2 in the UMA registry):

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      As Jane, send requests to the API:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#obtain-an-access-token-as-peter-and-consume-the-api","title":"Obtain an access token as Peter and consume the API","text":"

      Obtain an access token for user Peter (does not own any resource in the UMA registry):

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=peter' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      As Jane, send requests to the API:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings\n# HTTP/1.1 200 OK\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/1 -i\n# HTTP/1.1 403 Forbidden\n\ncurl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/greetings/2 -i\n# HTTP/1.1 403 Forbidden\n
      "},{"location":"authorino/docs/user-guides/resource-level-authorization-uma/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authconfig/talker-api-protection\nkubectl delete secret/talker-api-uma-credentials\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/sharding/","title":"User guide: Reducing the operational space","text":"

      By default, Authorino will watch events related to all AuthConfig custom resources in the reconciliation space (namespace or entire cluster). Instances can be configured though to only watch a subset of the resources, thus allowing such as:

      • to reduce noise and lower memory usage inside instances meant for restricted scope (e.g. Authorino deployed as a dedicated sidecar to protect only one host);
      • sharding auth config data across multiple instances;
      • multiple environments (e.g. staging, production) inside of a same cluster/namespace;
      • providing managed instances of Authorino that all watch CRs cluster-wide, yet dedicated to organizations allowed to create and operate their own AuthConfigs across multiple namespaces.
      \u26a0\ufe0f Important: This feature may not be available to users of Authorino via Kuadrant. Authorino capabilities featured in this guide:
      • Sharding
      • Identity verification & authentication \u2192 API key

      Check out as well the user guide about Authentication with API keys.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/sharding/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      "},{"location":"authorino/docs/user-guides/sharding/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/sharding/#deploy-instances-of-authorino","title":"\u2777 Deploy instances of Authorino","text":"

      Deploy an instance of Authorino dedicated to AuthConfigs and API key Secrets labeled with authorino/environment=staging:

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino-staging\nspec:\n  clusterWide: true\n  authConfigLabelSelectors: authorino/environment=staging\n  secretLabelSelectors: authorino/environment=staging\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n

      Deploy an instance of Authorino dedicated to AuthConfigs and API key Secrets labeled with authorino/environment=production, ans NOT labeled disabled:

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino-production\nspec:\n  clusterWide: true\n  authConfigLabelSelectors: authorino/environment=production,!disabled\n  secretLabelSelectors: authorino/environment=production,!disabled\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n

      The commands above will both request instances of Authorino that watch for AuthConfig resources cluster-wide1, with TLS disabled2.

      "},{"location":"authorino/docs/user-guides/sharding/#create-a-namespace-for-user-resources","title":"\u2778 Create a namespace for user resources","text":"
      kubectl create namespace myapp\n
      "},{"location":"authorino/docs/user-guides/sharding/#create-authconfigs-and-api-key-secrets-for-both-instances","title":"\u2779 Create AuthConfigs and API key Secrets for both instances","text":""},{"location":"authorino/docs/user-guides/sharding/#create-resources-for-authorino-staging","title":"Create resources for authorino-staging","text":"

      Create an AuthConfig:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: auth-config-1\n  labels:\n    authorino/environment: staging\nspec:\n  hosts:\n\n  - my-host.staging.io\n  authentication:\n    \"api-key\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino/api-key: \"true\"\n            authorino/environment: staging\nEOF\n

      Create an API key Secret:

      kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino/api-key: \"true\"\n    authorino/environment: staging\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n

      Verify in the logs that only the authorino-staging instance adds the resources to the index:

      kubectl logs $(kubectl get pods -l authorino-resource=authorino-staging -o name)\n# {\"level\":\"info\",\"ts\":1638382989.8327162,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"myapp/auth-config-1\"}\n# {\"level\":\"info\",\"ts\":1638382989.837424,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig/status\":\"myapp/auth-config-1\"}\n# {\"level\":\"info\",\"ts\":1638383144.9486837,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"myapp/api-key-1\"}\n
      "},{"location":"authorino/docs/user-guides/sharding/#create-resources-for-authorino-production","title":"Create resources for authorino-production","text":"

      Create an AuthConfig:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: auth-config-2\n  labels:\n    authorino/environment: production\nspec:\n  hosts:\n\n  - my-host.io\n  authentication:\n    \"api-key\":\n      apiKey:\n        selector:\n          matchLabels:\n            authorino/api-key: \"true\"\n            authorino/environment: production\nEOF\n

      Create an API key Secret:

      kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-2\n  labels:\n    authorino/api-key: \"true\"\n    authorino/environment: production\nstringData:\n  api_key: MUWdeBte7AbSWxl6CcvYNJ+3yEIm5CaL\ntype: Opaque\nEOF\n

      Verify in the logs that only the authorino-production instance adds the resources to the index:

      kubectl logs $(kubectl get pods -l authorino-resource=authorino-production -o name)\n# {\"level\":\"info\",\"ts\":1638383423.86086,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig.statusupdater\",\"msg\":\"resource status updated\",\"authconfig/status\":\"myapp/auth-config-2\"}\n# {\"level\":\"info\",\"ts\":1638383423.8608105,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource reconciled\",\"authconfig\":\"myapp/auth-config-2\"}\n# {\"level\":\"info\",\"ts\":1638383460.3515081,\"logger\":\"authorino.controller-runtime.manager.controller.secret\",\"msg\":\"resource reconciled\",\"secret\":\"myapp/api-key-2\"}\n
      "},{"location":"authorino/docs/user-guides/sharding/#remove-a-resource-from-scope","title":"\u277a Remove a resource from scope","text":"
      kubectl -n myapp label authconfig/auth-config-2 disabled=true\n# authconfig.authorino.kuadrant.io/auth-config-2 labeled\n

      Verify in the logs that the authorino-production instance removes the authconfig from the index:

      kubectl logs $(kubectl get pods -l authorino-resource=authorino-production -o name)\n# {\"level\":\"info\",\"ts\":1638383515.6428752,\"logger\":\"authorino.controller-runtime.manager.controller.authconfig\",\"msg\":\"resource de-indexed\",\"authconfig\":\"myapp/auth-config-2\"}\n
      "},{"location":"authorino/docs/user-guides/sharding/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete authorino/authorino-staging\nkubectl delete authorino/authorino-production\nkubectl delete namespace myapp\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. cluster-wide reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      2. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/token-normalization/","title":"User guide: Token normalization","text":"

      Broadly, the term token normalization in authentication systems usually implies the exchange of an authentication token, as provided by the user in a given format, and/or its associated identity claims, for another freshly issued token/set of claims, of a given (normalized) structure or format.

      The most typical use-case for token normalization involves accepting tokens issued by multiple trusted sources and of often varied authentication protocols, while ensuring that the eventual different data structures adopted by each of those sources are normalized, thus allowing to simplify policies and authorization checks that depend on those values. In general, however, any modification to the identity claims can be for the purpose of normalization.

      This user guide focuses on the aspect of mutation of the identity claims resolved from an authentication token, to a certain data format and/or by extending them, so that required attributes can thereafter be trusted to be present among the claims, in a desired form. For such, Authorino allows to extend resolved identity objects with custom attributes (custom claims) of either static values or with values fetched from the Authorization JSON.

      For not only normalizing the identity claims for purpose of writing simpler authorization checks and policies, but also getting Authorino to issue a new token in a normalized format, check the Festival Wristband tokens feature.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 Identity extension
      • Identity verification & authentication \u2192 API key
      • Identity verification & authentication \u2192 JWT verification
      • Authorization \u2192 Pattern-matching authorization

      Check out as well the user guides about Authentication with API keys, OpenID Connect Discovery and authentication with JWTs and Simple pattern-matching authorization policies.

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/token-normalization/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)
      • jq, to extract parts of JSON responses

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server by executing the command below. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant and already have your workload cluster configured and sample service application deployed, as well as your Gateway API network resources applied to route traffic to your service, skip straight to step \u277a.

      At step \u277a, instead of creating an AuthConfig custom resource, create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/token-normalization/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/token-normalization/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources in the default namespace2, with TLS disabled3.

      kubectl apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  listener:\n    tls:\n      enabled: false\n  oidcServer:\n    tls:\n      enabled: false\nEOF\n
      "},{"location":"authorino/docs/user-guides/token-normalization/#deploy-the-talker-api","title":"\u2778 Deploy the Talker API","text":"

      The Talker API is a simple HTTP service that echoes back in the response whatever it gets in the request. We will use it in this guide as the sample service to be protected by Authorino.

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\n
      "},{"location":"authorino/docs/user-guides/token-normalization/#setup-envoy","title":"\u2779 Setup Envoy","text":"

      The following bundle from the Authorino examples deploys the Envoy proxy and configuration to wire up the Talker API behind the reverse-proxy, with external authorization enabled with the Authorino instance.4

      kubectl apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\n

      The command above creates an Ingress with host name talker-api.127.0.0.1.nip.io. If you are using a local Kubernetes cluster created with Kind, forward requests from your local port 8000 to the Envoy service running inside the cluster:

      kubectl port-forward deployment/envoy 8000:8000 2>&1 >/dev/null &\n
      "},{"location":"authorino/docs/user-guides/token-normalization/#create-an-authconfig","title":"\u277a Create an AuthConfig","text":"

      Create an Authorino AuthConfig custom resource declaring the auth rules to be enforced.

      This example implements a policy that only users bound to the admin role can send DELETE requests.

      The config trusts access tokens issued by a Keycloak realm as well as API keys labeled specifically to a selected group (friends). The roles of the identities handled by Keycloak are managed in Keycloak, as realm roles. Particularly, users john and peter are bound to the member role, while user jane is bound to roles member and admin. As for the users authenticating with API key, they are all bound to the admin role.

      Without normalizing identity claims from these two different sources, the policy would have to handle the differences of data formats with additional ifs-and-elses. Instead, the config here uses the identity.extendedProperties option to ensure a custom roles (Array) claim is always present in the identity object. In the case of Keycloak ID tokens, the value is extracted from the realm_access.roles claim; for API key-resolved objects, the custom claim is set to the static value [\"admin\"].

      Kuadrant users \u2013 Remember to create an AuthPolicy instead of an AuthConfig. For more, see Kuadrant auth.
      kubectl apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: talker-api-protection\nspec:\n  hosts:\n\n  - talker-api.127.0.0.1.nip.io\n  authentication:\n    \"keycloak-kuadrant-realm\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n      overrides:\n        \"roles\":\n          selector: auth.identity.realm_access.roles\n    \"api-key-friends\":\n      apiKey:\n        selector:\n          matchLabels:\n            group: friends\n      credentials:\n        authorizationHeader:\n          prefix: APIKEY\n      defaults:\n        \"roles\":\n          value: [\"admin\"]\n  authorization:\n    \"only-admins-can-delete\":\n      when:\n      - selector: context.request.http.method\n        operator: eq\n        value: DELETE\n      patternMatching:\n        patterns:\n        - selector: auth.identity.roles\n          operator: incl\n          value: admin\nEOF\n
      "},{"location":"authorino/docs/user-guides/token-normalization/#create-an-api-key","title":"\u277b Create an API key","text":"
      kubectl apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: api-key-1\n  labels:\n    authorino.kuadrant.io/managed-by: authorino\n    group: friends\nstringData:\n  api_key: ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\ntype: Opaque\nEOF\n
      "},{"location":"authorino/docs/user-guides/token-normalization/#consume-the-api","title":"\u277c Consume the API","text":""},{"location":"authorino/docs/user-guides/token-normalization/#obtain-an-access-token-and-consume-the-api-as-jane-admin","title":"Obtain an access token and consume the API as Jane (admin)","text":"

      Obtain an access token with the Keycloak server for Jane:

      The AuthConfig deployed in the previous step is suitable for validating access tokens requested inside the cluster. This is because Keycloak's iss claim added to the JWTs matches always the host used to request the token and Authorino will later try to match this host to the host that provides the OpenID Connect configuration.

      Obtain an access token from within the cluster for the user Jane, whose e-mail has been verified:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=jane' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      If your Keycloak server is reachable from outside the cluster, feel free to obtain the token directly. Make sure the host name set in the OIDC issuer endpoint in the AuthConfig matches the one used to obtain the token and is as well reachable from within the cluster.

      Consume the API as Jane:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/token-normalization/#obtain-an-access-token-and-consume-the-api-as-john-member","title":"Obtain an access token and consume the API as John (member)","text":"

      Obtain an access token with the Keycloak server for John:

      ACCESS_TOKEN=$(kubectl run token --attach --rm --restart=Never -q --image=curlimages/curl -- http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant/protocol/openid-connect/token -s -d 'grant_type=password' -d 'client_id=demo' -d 'username=john' -d 'password=p' -d 'scope=openid' | jq -r .access_token)\n

      Consume the API as John:

      curl -H \"Authorization: Bearer $ACCESS_TOKEN\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 403 Forbidden\n
      "},{"location":"authorino/docs/user-guides/token-normalization/#consume-the-api-using-the-api-key-to-authenticate-admin","title":"Consume the API using the API key to authenticate (admin)","text":"
      curl -H \"Authorization: APIKEY ndyBzreUzF4zqDQsqSPMHkRhriEOtcRx\" -X DELETE http://talker-api.127.0.0.1.nip.io:8000/hello -i\n# HTTP/1.1 200 OK\n
      "},{"location":"authorino/docs/user-guides/token-normalization/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete secret/api-key-1\nkubectl delete authconfig/talker-api-protection\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/envoy/envoy-notls-deploy.yaml\nkubectl delete -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/talker-api/talker-api-deploy.yaml\nkubectl delete authorino/authorino\nkubectl delete namespace keycloak\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. namespaced reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      4. For details and instructions to setup Envoy manually, see Protect a service > Setup Envoy in the Getting Started page. If you are running your ingress gateway in Kubernetes and wants to avoid setting up and configuring your proxy manually, check out Kuadrant.\u00a0\u21a9

      "},{"location":"authorino/docs/user-guides/validating-webhook/","title":"User guide: Using Authorino as ValidatingWebhook service","text":"

      Authorino provides an interface for raw HTTP external authorization requests. This interface can be used for integrations other than the typical Envoy gRPC protocol, such as (though not limited to) using Authorino as a generic Kubernetes ValidatingWebhook service.

      The rules to validate a request to the Kubernetes API \u2013 typically a POST, PUT or DELETE request targeting a particular Kubernetes resource or collection \u2013, according to which either the change will be deemed accepted or not, are written in an Authorino AuthConfig custom resource. Authentication and authorization are performed by the Kubernetes API server as usual, with auth features of Authorino implementing the additional validation within the scope of an AdmissionReview request.

      This user guide provides an example of using Authorino as a Kubernetes ValidatingWebhook service that validates requests to CREATE and UPDATE Authorino AuthConfig resources. In other words, we will use Authorino as a validator inside the cluster that decides what is a valid AuthConfig for any application which wants to rely on Authorino to protect itself.

      Authorino capabilities featured in this guide:
      • Identity verification & authentication \u2192 Plain
      • Identity verification & authentication \u2192 Kubernetes TokenReview
      • Identity verification & authentication \u2192 API key
      • External auth metadata \u2192 HTTP GET/GET-by-POST
      • Authorization \u2192 Kubernetes SubjectAccessReview
      • Authorization \u2192 Open Policy Agent (OPA) Rego policies
      • Dynamic response \u2192 Festival Wristband tokens
      • Common feature \u2192 Conditions
      • Common feature \u2192 Priorities

      For further details about Authorino features in general, check the docs.

      "},{"location":"authorino/docs/user-guides/validating-webhook/#requirements","title":"Requirements","text":"
      • Kubernetes server with permissions to install cluster-scoped resources (operator, CRDs and RBAC)
      • Identity Provider (IdP) that implements OpenID Connect authentication and OpenID Connect Discovery (e.g. Keycloak)

      If you do not own a Kubernetes server already and just want to try out the steps in this guide, you can create a local containerized cluster by executing the command below. In this case, the main requirement is having Kind installed, with either Docker or Podman.

      kind create cluster --name authorino-tutorial\n

      Deploy the identity provider and authentication server. For the examples in this guide, we are going to use a Keycloak server preloaded with all required realm settings.

      The Keycloak server is only needed for trying out validating AuthConfig resources that use the authentication server.

      kubectl create namespace keycloak\nkubectl -n keycloak apply -f https://raw.githubusercontent.com/kuadrant/authorino-examples/main/keycloak/keycloak-deploy.yaml\n

      The next steps walk you through installing Authorino, deploying and configuring a sample service called Talker API to be protected by the authorization service.

      Using Kuadrant

      If you are a user of Kuadrant you may already have Authorino installed and running. In this case, skip straight to step \u2778.

      At step \u277a, alternatively to creating an AuthConfig custom resource, you may create a Kuadrant AuthPolicy one. The schema of the AuthConfig's spec matches the one of the AuthPolicy's, except spec.host, which is not available in the Kuadrant AuthPolicy. Host names in a Kuadrant AuthPolicy are inferred automatically from the Kubernetes network object referred in spec.targetRef and route selectors declared in the policy.

      For more about using Kuadrant to enforce authorization, check out Kuadrant auth.

      "},{"location":"authorino/docs/user-guides/validating-webhook/#install-the-authorino-operator-cluster-admin-required","title":"\u2776 Install the Authorino Operator (cluster admin required)","text":"

      The following command will install the Authorino Operator in the Kubernetes cluster. The operator manages instances of the Authorino authorization service.

      curl -sL https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/utils/install.sh | bash -s\n
      "},{"location":"authorino/docs/user-guides/validating-webhook/#deploy-authorino","title":"\u2777 Deploy Authorino","text":"

      Create the namespace:

      kubectl create namespace authorino\n

      Create the TLS certificates:

      curl -sSL https://raw.githubusercontent.com/Kuadrant/authorino/main/deploy/certs.yaml | sed \"s/\\$(AUTHORINO_INSTANCE)/authorino/g;s/\\$(NAMESPACE)/authorino/g\" | kubectl -n authorino apply -f -\n

      Create the Authorino instance:

      The following command will request an instance of Authorino as a separate service1 that watches for AuthConfig resources cluster-wide2, with TLS enabled3.

      kubectl -n authorino apply -f -<<EOF\napiVersion: operator.authorino.kuadrant.io/v1beta1\nkind: Authorino\nmetadata:\n  name: authorino\nspec:\n  clusterWide: true\n  listener:\n    ports:\n      grpc: 50051\n      http: 5001 # for admissionreview requests sent by the kubernetes api server\n    tls:\n      certSecretRef:\n        name: authorino-server-cert\n  oidcServer:\n    tls:\n      certSecretRef:\n        name: authorino-oidc-server-cert\nEOF\n

      For convenience, the same instance of Authorino pointed as the validating webhook will also be targeted for the sample AuthConfigs created to test the validation. For using different instances of Authorino for the validating webhook and for protecting applications behind a proxy, check out the section about sharding in the docs. There is also a user guide on the topic, with concrete examples.

      "},{"location":"authorino/docs/user-guides/validating-webhook/#create-the-authconfig-and-related-clusterrole","title":"\u2778 Create the AuthConfig and related ClusterRole","text":"

      Create the AuthConfig with the auth rules to validate other AuthConfig resources applied to the cluster.

      The AuthConfig to validate other AuthConfigs will enforce the following rules:

      • Authorino features that cannot be used by any application in their security schemes:
      • Anonymous Access
      • Plain identity object extracted from context
      • Kubernetes authentication (TokenReview)
      • Kubernetes authorization (SubjectAccessReview)
      • Festival Wristband tokens
      • Authorino features that require a RoleBinding to a specific ClusterRole in the 'authorino' namespace, to be used in a AuthConfig:
      • Authorino API key authentication
      • All metadata pulled from external sources must be cached for precisely 5 minutes (300 seconds)
      kubectl -n authorino apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: authconfig-validator\nspec:\n  # admissionreview requests will be sent to this host name\n  hosts:\n\n  - authorino-authorino-authorization.authorino.svc\n\n  # because we're using a single authorino instance for the validating webhook and to protect the user applications,\n  # skip operations related to this one authconfig in the 'authorino' namespace\n  when:\n\n  - selector: context.request.http.body.@fromstr|request.object.metadata.namespace\n    operator: neq\n    value: authorino\n\n  # kubernetes admissionreviews carry info about the authenticated user\n  authentication:\n    \"k8s-userinfo\":\n      plain:\n        selector: context.request.http.body.@fromstr|request.userInfo\n\n  authorization:\n    \"features\":\n      opa:\n        rego: |\n          authconfig = json.unmarshal(input.context.request.http.body).request.object\n\n          forbidden { count(object.get(authconfig.spec, \"authentication\", [])) == 0 }\n          forbidden { authconfig.spec.authentication[_].anonymous }\n          forbidden { authconfig.spec.authentication[_].kubernetesTokenReview }\n          forbidden { authconfig.spec.authentication[_].plain }\n          forbidden { authconfig.spec.authorization[_].kubernetesSubjectAccessReview }\n          forbidden { authconfig.spec.response.success.headers[_].wristband }\n\n          apiKey { authconfig.spec.authentication[_].apiKey }\n\n          allow { count(authconfig.spec.authentication) > 0; not forbidden }\n        allValues: true\n\n    \"apikey-authn-requires-k8s-role-binding\":\n      priority: 1\n      when:\n\n      - selector: auth.authorization.features.apiKey\n        operator: eq\n        value: \"true\"\n      kubernetesSubjectAccessReview:\n        user:\n          selector: auth.identity.username\n        resourceAttributes:\n          namespace: { value: authorino }\n          group: { value: authorino.kuadrant.io }\n          resource: { value: authconfigs-with-apikeys }\n          verb: { value: create }\n\n    \"metadata-cache-ttl\":\n      priority: 1\n      opa:\n        rego: |\n          invalid_ttl = input.auth.authorization.features.authconfig.spec.metadata[_].cache.ttl != 300\n          allow { not invalid_ttl }\nEOF\n

      Define a ClusterRole to control the usage of protected features of Authorino:

      kubectl apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRole\nmetadata:\n  name: authorino-apikey\nrules:\n\n- apiGroups: [\"authorino.kuadrant.io\"]\n  resources: [\"authconfigs-with-apikeys\"] # not a real k8s resource\n  verbs: [\"create\"]\nEOF\n
      "},{"location":"authorino/docs/user-guides/validating-webhook/#create-the-validatingwebhookconfiguration","title":"\u2779 Create the ValidatingWebhookConfiguration","text":"
      kubectl -n authorino apply -f -<<EOF\napiVersion: admissionregistration.k8s.io/v1\nkind: ValidatingWebhookConfiguration\nmetadata:\n  name: authconfig-authz\n  annotations:\n    cert-manager.io/inject-ca-from: authorino/authorino-ca-cert\nwebhooks:\n\n- name: check-authconfig.authorino.kuadrant.io\n  clientConfig:\n    service:\n      namespace: authorino\n      name: authorino-authorino-authorization\n      port: 5001\n      path: /check\n  rules:\n  - apiGroups: [\"authorino.kuadrant.io\"]\n    apiVersions: [\"v1beta2\"]\n    resources: [\"authconfigs\"]\n    operations: [\"CREATE\", \"UPDATE\"]\n    scope: Namespaced\n  sideEffects: None\n  admissionReviewVersions: [\"v1\"]\nEOF\n
      "},{"location":"authorino/docs/user-guides/validating-webhook/#try-it-out","title":"\u277a Try it out","text":"

      Create a namespace:

      kubectl create namespace myapp\n
      "},{"location":"authorino/docs/user-guides/validating-webhook/#with-a-valid-authconfig","title":"With a valid AuthConfig","text":"Kuadrant users \u2013 For this and other example AuthConfigs below, if you create a Kuadrant AuthPolicy instead, the output of the commands shall differ. The requested AuthPolicy may be initially accepted, but its state will turn ready or not ready depending on whether the corresponding AuthConfig requested by Kuadrant is accepted or rejected, according to the validating webhook rules. Check the state of the resources to confirm. For more, see Kuadrant auth.
      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection created\n
      "},{"location":"authorino/docs/user-guides/validating-webhook/#with-forbidden-features","title":"With forbidden features","text":"

      Anonymous access:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":null}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"anonymous-access\":\n      anonymous: {}\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"anonymous-access\\\":{\\\"anonymous\\\":{}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"anonymous-access\":{\"anonymous\":{}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

      Kubernetes TokenReview:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"k8s-tokenreview\":\n      kubernetesTokenReview:\n        audiences: [\"myapp\"]\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"k8s-tokenreview\\\":{\\\"kubernetesTokenReview\\\":{\\\"audiences\\\":[\\\"myapp\\\"]}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"k8s-tokenreview\":{\"kubernetesTokenReview\":{\"audiences\":[\"myapp\"]}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

      Plain identity extracted from context:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"envoy-jwt-authn\":\n      plain:\n        selector: context.metadata_context.filter_metadata.envoy\\.filters\\.http\\.jwt_authn|verified_jwt\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"envoy-jwt-authn\\\":{\\\"plain\\\":{\\\"selector\\\":\\\"context.metadata_context.filter_metadata.envoy\\\\\\\\.filters\\\\\\\\.http\\\\\\\\.jwt_authn|verified_jwt\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"envoy-jwt-authn\":{\"plain\":{\"selector\":\"context.metadata_context.filter_metadata.envoy\\\\.filters\\\\.http\\\\.jwt_authn|verified_jwt\"}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

      Kubernetes SubjectAccessReview:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  authorization:\n    \"k8s-subjectaccessreview\":\n      kubernetesSubjectAccessReview:\n        user:\n          selector: auth.identity.sub\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"authorization\\\":{\\\"k8s-subjectaccessreview\\\":{\\\"kubernetesSubjectAccessReview\\\":{\\\"user\\\":{\\\"selector\\\":\\\"auth.identity.sub\\\"}}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authorization\":{\"k8s-subjectaccessreview\":{\"kubernetesSubjectAccessReview\":{\"user\":{\"selector\":\"auth.identity.sub\"}}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

      Festival Wristband tokens:

      kubectl -n myapp apply -f -<<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n  name: wristband-signing-key\nstringData:\n  key.pem: |\n    -----BEGIN EC PRIVATE KEY-----\n    MHcCAQEEIDHvuf81gVlWGo0hmXGTAnA/HVxGuH8vOc7/8jewcVvqoAoGCCqGSM49\n    AwEHoUQDQgAETJf5NLVKplSYp95TOfhVPqvxvEibRyjrUZwwtpDuQZxJKDysoGwn\n    cnUvHIu23SgW+Ee9lxSmZGhO4eTdQeKxMA==\n    -----END EC PRIVATE KEY-----\ntype: Opaque\n---\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  response:\n    success:\n      headers:\n        \"wristband\":\n          wristband:\n            issuer: http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\n            signingKeyRefs:\n            - algorithm: ES256\n              name: wristband-signing-key\nEOF\n# secret/wristband-signing-key created\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"],\\\"response\\\":{\\\"success\\\":{\\\"headers\\\":{\\\"wristband\\\":{\\\"wristband\\\":{\\\"issuer\\\":\\\"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\\\",\\\"signingKeyRefs\\\":[{\\\"algorithm\\\":\\\"ES256\\\",\\\"name\\\":\\\"wristband-signing-key\\\"}]}}}}}}}\\n\"}},\"spec\":{\"response\":{\"success\":{\"headers\":{\"wristband\":{\"wristband\":{\"issuer\":\"http://authorino-authorino-oidc.authorino.svc.cluster.local:8083/myapp/myapp-protection/wristband\",\"signingKeyRefs\":[{\"algorithm\":\"ES256\",\"name\":\"wristband-signing-key\"}]}}}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n
      "},{"location":"authorino/docs/user-guides/validating-webhook/#with-features-that-require-additional-permissions","title":"With features that require additional permissions","text":"

      Before adding the required permissions:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"api-key\":\n      apiKey:\n        selector:\n          matchLabels: { app: myapp }\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"api-key\\\":{\\\"apiKey\\\":{\\\"selector\\\":{\\\"matchLabels\\\":{\\\"app\\\":\\\"myapp\\\"}}}}},\\\"hosts\\\":[\\\"myapp.io\\\"]}}\\n\"}},\"spec\":{\"authentication\":{\"api-key\":{\"apiKey\":{\"selector\":{\"matchLabels\":{\"app\":\"myapp\"}}}},\"keycloak\":null}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Not authorized: unknown reason\n

      Add the required permissions:

      kubectl -n authorino apply -f -<<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: authorino-apikey\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: authorino-apikey\nsubjects:\n\n- kind: User\n  name: kubernetes-admin\nEOF\n# rolebinding.rbac.authorization.k8s.io/authorino-apikey created\n

      After adding the required permissions:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"api-key\":\n      apiKey:\n        selector:\n          matchLabels: { app: myapp }\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection configured\n
      "},{"location":"authorino/docs/user-guides/validating-webhook/#with-features-that-require-specific-property-validation","title":"With features that require specific property validation","text":"

      Invalid:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  metadata:\n    \"external-source\":\n      http:\n        url: http://metadata.io\n      cache:\n        key: { value: global }\n        ttl: 60\nEOF\n# Error from server: error when applying patch:\n# {\"metadata\":{\"annotations\":{\"kubectl.kubernetes.io/last-applied-configuration\":\"{\\\"apiVersion\\\":\\\"authorino.kuadrant.io/v1beta3\\\",\\\"kind\\\":\\\"AuthConfig\\\",\\\"metadata\\\":{\\\"annotations\\\":{},\\\"name\\\":\\\"myapp-protection\\\",\\\"namespace\\\":\\\"myapp\\\"},\\\"spec\\\":{\\\"authentication\\\":{\\\"keycloak\\\":{\\\"jwt\\\":{\\\"issuerUrl\\\":\\\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\\\"}}},\\\"hosts\\\":[\\\"myapp.io\\\"],\\\"metadata\\\":{\\\"external-source\\\":{\\\"cache\\\":{\\\"key\\\":{\\\"value\\\":\\\"global\\\"},\\\"ttl\\\":60},\\\"http\\\":{\\\"url\\\":\\\"http://metadata.io\\\"}}}}}\\n\"}},\"spec\":{\"authentication\":{\"api-key\":null,\"keycloak\":{\"jwt\":{\"issuerUrl\":\"http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\"}}},\"metadata\":{\"external-source\":{\"cache\":{\"key\":{\"value\":\"global\"},\"ttl\":60},\"http\":{\"url\":\"http://metadata.io\"}}}}}\n# to:\n# Resource: \"authorino.kuadrant.io/v1beta3, Resource=authconfigs\", GroupVersionKind: \"authorino.kuadrant.io/v1beta3, Kind=AuthConfig\"\n# Name: \"myapp-protection\", Namespace: \"myapp\"\n# for: \"STDIN\": error when patching \"STDIN\": admission webhook \"check-authconfig.authorino.kuadrant.io\" denied the request: Unauthorized\n

      Valid:

      kubectl -n myapp apply -f -<<EOF\napiVersion: authorino.kuadrant.io/v1beta3\nkind: AuthConfig\nmetadata:\n  name: myapp-protection\nspec:\n  hosts:\n\n  - myapp.io\n  authentication:\n    \"keycloak\":\n      jwt:\n        issuerUrl: http://keycloak.keycloak.svc.cluster.local:8080/realms/kuadrant\n  metadata:\n    \"external-source\":\n      http:\n        url: http://metadata.io\n      cache:\n        key: { value: global }\n        ttl: 300\nEOF\n# authconfig.authorino.kuadrant.io/myapp-protection configured\n
      "},{"location":"authorino/docs/user-guides/validating-webhook/#cleanup","title":"Cleanup","text":"

      If you have started a Kubernetes cluster locally with Kind to try this user guide, delete it by running:

      kind delete cluster --name authorino-tutorial\n

      Otherwise, delete the resources created in each step:

      kubectl delete namespace myapp\nkubectl delete namespace authorino\nkubectl delete clusterrole authorino-apikey\nkubectl delete namespace keycloak\n

      To uninstall the Authorino Operator and manifests (CRDs, RBAC, etc), run:

      kubectl delete -f https://raw.githubusercontent.com/Kuadrant/authorino-operator/main/config/deploy/manifests.yaml\n
      1. In contrast to a dedicated sidecar of the protected service and other architectures. Check out Architecture > Topologies for all options.\u00a0\u21a9

      2. cluster-wide reconciliation mode. See Cluster-wide vs. Namespaced instances.\u00a0\u21a9

      3. For other variants and deployment options, check out Getting Started, as well as the Authorino CRD specification.\u00a0\u21a9

      "},{"location":"architecture/docs/design/architectural-overview-v1/","title":"Kuadrant Architectural Overview","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#overview","title":"Overview","text":"

      Kuadrant provides connectivity, security and service protection capabilities in both a single and multi-cluster environment. It exposes these capabilities in the form of Kubernetes CRDs that implement the Gateway API concept of policy attachment. These policy APIs can target specific Gateway API resources such as Gateways and HTTPRoutes to extend their capabilities and configuration. They enable platform engineers to secure, protect and connect their infrastructure and allow application developers to self service and refine policies to their specific needs in order to protect exposed endpoints.

      "},{"location":"architecture/docs/design/architectural-overview-v1/#key-architectural-areas","title":"Key Architectural Areas","text":"
      • Kuadrant architecture is defined and implemented with both control plane and data plane components.
      • The control plane is where policies are exposed and expressed as Kubernetes APIs and reconciled by a policy controller.
      • The data plane is where Kuadrant's \"policy enforcement\" components exist. These components are configured by the control plane and integrate either directly with the Gateway provider or via external integrations.
      "},{"location":"architecture/docs/design/architectural-overview-v1/#10000m-architecture","title":"10000m Architecture","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#control-plane-components-and-responsibilities","title":"Control Plane Components and Responsibilities","text":"

      The control plane is a set of controllers and operators that are responsible for for installation and configuration of other components such as the data plane enforcement components and configuration of the Gateway to enable the data plane components to interact with incoming requests. The control plane also owns and reconciles the policy CRD APIs into more complex and specific configuration objects that the policy enforcement components consume in order to know the rules to apply to incoming requests or the configuration to apply to external integrations such as DNS and ACME providers.

      "},{"location":"architecture/docs/design/architectural-overview-v1/#kuadrant-operator","title":"Kuadrant Operator","text":"
      • Installation and configuration of other control plane components
      • Installation of data plane policy enforcement components via their respective control plane operators
      • Configures the Gateway via WASM plugin and other APIs to leverage the data plane components for auth and rate limiting on incoming requests.
      • Exposes RateLimitPolicy , AuthPolicy, DNSPolicy and TLSPolicy and reconciles these into enforceable configuration for the data plane.
      • Exposes Kuadrant and reconciles this to configure and trigger installation of the required data plane components and other control plane components.
      "},{"location":"architecture/docs/design/architectural-overview-v1/#limitador-operator","title":"Limitador Operator:","text":"
      • Installs and configures the Limitador data plane component based on the Limitador CR. Limits specified in the limitador CR are mountd via configmap into the limitador component.
      "},{"location":"architecture/docs/design/architectural-overview-v1/#authorino-operator","title":"Authorino Operator:","text":"
      • Installs and configures the Authorino data plane component based on the Authorino CR.
      "},{"location":"architecture/docs/design/architectural-overview-v1/#cert-manager","title":"Cert-Manager:","text":"
      • Manages TLS certificates for our components and for the Gateways. Consumes Certificate resources created by Kuadrant operator in response to the TLSPolicy.
      "},{"location":"architecture/docs/design/architectural-overview-v1/#dns-operator","title":"DNS Operator","text":"
      • DNS operator consumes DNSRecord resources that are configured via the DNSPolicy api and applies them into the targeted cloud DNS provider AWS, Azure and Google DNS are our main targets
      "},{"location":"architecture/docs/design/architectural-overview-v1/#data-plane-components-and-responsibilities","title":"Data Plane Components and Responsibilities","text":"

      The data plane components sit in the request flow and are responsible for enforcing configuration defined by policy and providing service protection capabilities based on configuration managed and created by the control plane.

      "},{"location":"architecture/docs/design/architectural-overview-v1/#limitador","title":"Limitador","text":"
      • Complies with the with Envoy rate limiting API to provide rate limiting to the gateway. Consumes limits from a configmap created based on the RateLimitPolicy API.
      "},{"location":"architecture/docs/design/architectural-overview-v1/#authorino","title":"Authorino","text":"
      • Complies with the Envoy external auth API to provide auth integration to the gateway. It provides both Authn and Authz. Consumes AuthConfigs created by the kuadrant operator based on the defined AuthPolicy API.
      "},{"location":"architecture/docs/design/architectural-overview-v1/#wasm-shim","title":"WASM Shim","text":"
      • Uses the Proxy WASM ABI Spec to integrate with Envoy and provide filtering and connectivity to Limitador (for request time enforcement of rate limiting) and Authorino (for request time enforcement of authentication & authorization).
      "},{"location":"architecture/docs/design/architectural-overview-v1/#single-cluster-layout","title":"Single Cluster Layout","text":"

      In a single cluster, you have the Kuadrant control plane and data plane sitting together. It is configured to integrate with Gateways on the same cluster and configure a DNS zone via a DNS provider secret (configured alongside a DNSPolicy). Storage of rate limit counters is possible but not required as they are not being shared.

      "},{"location":"architecture/docs/design/architectural-overview-v1/#multi-cluster","title":"Multi-Cluster","text":"

      In the default multi-cluster setup. Each individual cluster has Kuadrant installed. Each of these clusters are unaware of the other. They are effectively operating as single clusters. The multi-cluster aspect is created by sharing access with the DNS zone, using a shared host across the clusters and leveraging shared counter storage. The zone is operated on independently by each of DNS operator on both clusters to form a single cohesive record set. More details on this can be found in the following RFC. The rate limit counters can also be shared and used by different clusters in order to provide global rate limiting. This is achieved by connecting each instance of Limitador to a shared data store that uses the Redis protocol.

      Shown above is a multi-cluster multi ingress gateway topology. This might be used to support a geographically distributed system for example. However, it is also possible to leverage overlay networking tools such as Skupper that integrate at the Kubernetes service level to have a single gateway cluster that then integrates with multiple backends (on different clusters or in custom infrastructure).

      "},{"location":"architecture/docs/design/architectural-overview-v1/#observability","title":"Observability","text":"

      The Kuadrant architecture is intended to work with some popular monitoring tools for tracing, metrics and log aggregation. Those tools are:

      • Prometheus for scraping metrics - and optionally Thanos for high availability & federation
      • Loki for log aggregation - via log collectors like vector
      • Tempo for trace collecting
      • Grafana for visualing the above

      Depending on the number of clusters in your configuration, you may decide to have a monitoring system on the same cluster as workloads, or in a separate cluster completely. Below are 2 example architectures based on the single cluster and multi cluster layouts. In the single cluster architecture, the collector components (Prometheus, Vector and Tempo) are in the same cluster as the log aggregation (Loki) and visualisation component (Grafana).

      In the multi cluster architecture, the collectors that scrape metrics or logs (Prometheus & Vector) are deployed alongside the workloads in each cluster. However, as traces are sent to a collector (Tempo) from each component, it can be centralised in a separate cluster. Thanos is used in this architecutre so that each prometheus can federate metrics back to a central location. The log collector (vector) can forward logs to a central loki instance. Finally, the visualisation component (Grafana) is centralised as well, with data sources configured for each of the 3 components on the same cluster.

      "},{"location":"architecture/docs/design/architectural-overview-v1/#dependencies","title":"Dependencies","text":""},{"location":"architecture/docs/design/architectural-overview-v1/#istio-or-envoy-gateway","title":"Istio or Envoy Gateway:","text":"
      • Gateway API provider that Kuadrant integrates with via WASM to provide service protection capabilities. Kuadrant configures Envoy Proxy via the Istio/Envoy Gateway control plane in order to enforce the applied policies and register components such as Authorino and Limitador.
      • Used by RateLimitPolicy and AuthPolicy
      "},{"location":"architecture/docs/design/architectural-overview-v1/#gateway-api-required","title":"Gateway API: Required","text":"
      • New standard for Ingress from the Kubernetes community
      • Gateway API is the core API that Kuadrant integrates with.
      "},{"location":"dns-operator/docs/provider/","title":"Configuring a DNS Provider","text":"

      In order to be able to interact with supported DNS providers, Kuadrant needs a credential that it can use.

      "},{"location":"dns-operator/docs/provider/#supported-providers","title":"Supported Providers","text":"

      Kuadrant Supports the following DNS providers currently

      • AWS Route 53 (aws)
      • Google Cloud DNS (gcp)
      • Azure (azure)
      "},{"location":"dns-operator/docs/provider/#aws-route-53-provider","title":"AWS Route 53 Provider","text":"

      Kuadrant expects a Secret with a credential. Below is an example for AWS Route 53. It is important to set the secret type to aws:

      kubectl create secret generic my-aws-credentials \\\n  --namespace=kuadrant-dns-system \\\n  --type=kuadrant.io/aws \\\n  --from-literal=AWS_ACCESS_KEY_ID=XXXX \\\n  --from-literal=AWS_REGION=eu-west-1 \\\n  --from-literal=AWS_SECRET_ACCESS_KEY=XXX\n
      Key Example Value Description AWS_REGION eu-west-1 AWS Region AWS_ACCESS_KEY_ID XXXX AWS Access Key ID (see note on permissions below) AWS_SECRET_ACCESS_KEY XXXX AWS Secret Access Key"},{"location":"dns-operator/docs/provider/#aws-iam-permissions-required","title":"AWS IAM Permissions Required","text":"

      We have tested using the available policy AmazonRoute53FullAccess however it should also be possible to restrict the credential down to a particular zone. More info can be found in the AWS docs:

      https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/access-control-managing-permissions.html

      By default, Kuadrant will list the available zones and find the matching zone based on the listener host in the gateway listener. If it finds more than one matching zone for a given listener host, it will not update any of those zones. When providing a credential you should limit that credential down to just have write access to the zones you want Kuadrant to manage. Below is an example of a an AWS policy for doing this type of thing:

      {\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Sid\": \"VisualEditor0\",\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"route53:ListTagsForResources\",\n                \"route53:GetHealthCheckLastFailureReason\",\n                \"route53:GetHealthCheckStatus\",\n                \"route53:GetChange\",\n                \"route53:GetHostedZone\",\n                \"route53:ChangeResourceRecordSets\",\n                \"route53:ListResourceRecordSets\",\n                \"route53:GetHealthCheck\",\n                \"route53:UpdateHostedZoneComment\",\n                \"route53:UpdateHealthCheck\",\n                \"route53:CreateHealthCheck\",\n                \"route53:DeleteHealthCheck\",\n                \"route53:ListTagsForResource\",\n                \"route53:ListHealthChecks\",\n                \"route53:GetGeoLocation\",\n                \"route53:ListGeoLocations\",\n                \"route53:ListHostedZonesByName\",\n                \"route53:GetHealthCheckCount\"\n            ],\n            \"Resource\": [\n                \"arn:aws:route53:::hostedzone/Z08187901Y93585DDGM6K\",\n                \"arn:aws:route53:::healthcheck/*\",\n                \"arn:aws:route53:::change/*\"\n            ]\n        },\n        {\n            \"Sid\": \"VisualEditor1\",\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"route53:ListHostedZones\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n
      "},{"location":"dns-operator/docs/provider/#google-cloud-dns-provider","title":"Google Cloud DNS Provider","text":"

      Kuadant expects a secret with a credential. Below is an example for Google DNS. It is important to set the secret type to gcp:

      kubectl create secret generic my-test-gcp-credentials \\\n  --namespace=kuadrant-dns-system \\\n  --type=kuadrant.io/gcp \\\n  --from-literal=PROJECT_ID=xxx \\\n  --from-file=GOOGLE=$HOME/.config/gcloud/application_default_credentials.json\n
      Env Var Example Value Description GOOGLE {\"client_id\": \"***\",\"client_secret\": \"***\",\"refresh_token\": \"***\",\"type\": \"authorized_user\"} This is the JSON created from either the credential created by the gcloud CLI, or the JSON from the Service account PROJECT_ID my_project_id ID to the Google project"},{"location":"dns-operator/docs/provider/#google-cloud-dns-access-permissions-required","title":"Google Cloud DNS Access permissions required","text":"

      We have tested with the dns.admin role. See for more details:

      https://cloud.google.com/dns/docs/access-control#dns.admin

      "},{"location":"dns-operator/docs/provider/#azure-cloud-dns-provider","title":"Azure Cloud DNS Provider","text":"

      Kuadrant expects a Secret with a credential. Below is an example for Azure. It is important to set the secret type to azure:

      We recommend creating a new service principal for managing DNS. Azure Service Principal Docs

      # Create the service principal\n$ DNS_NEW_SP_NAME=kuadrantDnsPrincipal\n$ DNS_SP=$(az ad sp create-for-rbac --name $DNS_NEW_SP_NAME)\n$ DNS_SP_APP_ID=$(echo $DNS_SP | jq -r '.appId')\n$ DNS_SP_PASSWORD=$(echo $DNS_SP | jq -r '.password')\n
      "},{"location":"dns-operator/docs/provider/#azure-cloud-dns-access-permissions-required","title":"Azure Cloud DNS Access permissions required","text":"

      You will need to grant read and contributor access to the zone(s) you want managed for the service principal you are using.

      1) fetch DNS id used to grant access to the service principal

      DNS_ID=$(az network dns zone show --name example.com \\\n --resource-group ExampleDNSResourceGroup --query \"id\" --output tsv)\n\n# get yor resource group id\n\nRESOURCE_GROUP_ID=az group show --resource-group ExampleDNSResourceGroup | jq \".id\" -r\n
      "},{"location":"dns-operator/docs/provider/#provide-reader-access-to-the-resource-group","title":"provide reader access to the resource group","text":"

      $ az role assignment create --role \"Reader\" --assignee $DNS_SP_APP_ID --scope $DNS_ID

      "},{"location":"dns-operator/docs/provider/#provide-contributor-access-to-dns-zone-itself","title":"provide contributor access to DNS Zone itself","text":"

      $ az role assignment create --role \"Contributor\" --assignee $DNS_SP_APP_ID --scope $DNS_ID

      As we are setting up advanced traffic rules for GEO and Weighted responses you will also need to grant traffic manager access:

      az role assignment create --role \"Traffic Manager Contributor\" --assignee $DNS_SP_APP_ID --scope $RESOURCE_GROUP_ID\n
      cat <<-EOF > /local/path/to/azure.json\n{\n  \"tenantId\": \"$(az account show --query tenantId -o tsv)\",\n  \"subscriptionId\": \"$(az account show --query id -o tsv)\",\n  \"resourceGroup\": \"ExampleDNSResourceGroup\",\n  \"aadClientId\": \"$DNS_SP_APP_ID\",\n  \"aadClientSecret\": \"$DNS_SP_PASSWORD\"\n}\nEOF\n

      Finally setup the secret with the credential azure.json file

      kubectl create secret generic my-test-azure-credentials \\\n  --namespace=kuadrant-dns-system \\\n  --type=kuadrant.io/azure \\\n  --from-file=azure.json=/local/path/to/azure.json\n
      "}]} \ No newline at end of file diff --git a/dev/sitemap.xml b/dev/sitemap.xml index 06b151ed..3a05f06f 100644 --- a/dev/sitemap.xml +++ b/dev/sitemap.xml @@ -16,14 +16,6 @@ https://docs.kuadrant.io/dev/install-olm/ 2025-01-24 - - https://docs.kuadrant.io/dev/kuadrant-operator/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrant-operator/doc/install/install-make/ - 2025-01-24 - https://docs.kuadrant.io/dev/kuadrant-operator/doc/install/mtls-configuration/ 2025-01-24 @@ -44,18 +36,10 @@ https://docs.kuadrant.io/dev/kuadrant-operator/doc/overviews/auth/ 2025-01-24 - - https://docs.kuadrant.io/dev/kuadrant-operator/doc/overviews/development/ - 2025-01-24 - https://docs.kuadrant.io/dev/kuadrant-operator/doc/overviews/dns/ 2025-01-24 - - https://docs.kuadrant.io/dev/kuadrant-operator/doc/overviews/logging/ - 2025-01-24 - https://docs.kuadrant.io/dev/kuadrant-operator/doc/overviews/rate-limiting/ 2025-01-24 @@ -64,10 +48,6 @@ https://docs.kuadrant.io/dev/kuadrant-operator/doc/overviews/tls/ 2025-01-24 - - https://docs.kuadrant.io/dev/kuadrant-operator/doc/proposals/rlp-target-gateway-resource/ - 2025-01-24 - https://docs.kuadrant.io/dev/kuadrant-operator/doc/reference/authpolicy/ 2025-01-24 @@ -96,10 +76,6 @@ https://docs.kuadrant.io/dev/kuadrant-operator/doc/user-guides/dns/basic-dns-configuration/ 2025-01-24 - - https://docs.kuadrant.io/dev/kuadrant-operator/doc/user-guides/dns/dns-excluding-specific-addresses/ - 2025-01-24 - https://docs.kuadrant.io/dev/kuadrant-operator/doc/user-guides/dns/dnshealthchecks/ 2025-01-24 @@ -112,18 +88,10 @@ https://docs.kuadrant.io/dev/kuadrant-operator/doc/user-guides/dns/load-balanced-dns/ 2025-01-24 - - https://docs.kuadrant.io/dev/kuadrant-operator/doc/user-guides/dns/orphan-dns-records/ - 2025-01-24 - https://docs.kuadrant.io/dev/kuadrant-operator/doc/user-guides/full-walkthrough/secure-protect-connect/ 2025-01-24 - - https://docs.kuadrant.io/dev/kuadrant-operator/doc/user-guides/misc/external-api/ - 2025-01-24 - https://docs.kuadrant.io/dev/kuadrant-operator/doc/user-guides/ratelimiting/authenticated-rl-for-app-developers/ 2025-01-24 @@ -152,14 +120,6 @@ https://docs.kuadrant.io/dev/kuadrant-operator/doc/user-guides/tls/gateway-tls/ 2025-01-24 - - https://docs.kuadrant.io/dev/kuadrant-operator/examples/alerts/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/authorino/ - 2025-01-24 - https://docs.kuadrant.io/dev/authorino/docs/ 2025-01-24 @@ -168,10 +128,6 @@ https://docs.kuadrant.io/dev/authorino/docs/architecture/ 2025-01-24 - - https://docs.kuadrant.io/dev/authorino/docs/code_of_conduct/ - 2025-01-24 - https://docs.kuadrant.io/dev/authorino/docs/contributing/ 2025-01-24 @@ -308,156 +264,12 @@ https://docs.kuadrant.io/dev/authorino/docs/user-guides/validating-webhook/ 2025-01-24 - - https://docs.kuadrant.io/dev/authorino-operator/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/doc/how-it-works/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/doc/topologies/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/doc/migrations/conditions/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/doc/server/configuration/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/limitador/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/limitador-server/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/limitador-server/kubernetes/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/limitador-server/sandbox/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/limitador-server/sandbox/redis-otel/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador/limitador-server/sandbox/redis-tls/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador-operator/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador-operator/doc/custom-image/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador-operator/doc/development/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador-operator/doc/logging/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador-operator/doc/rate-limit-headers/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador-operator/doc/resource-requirements/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador-operator/doc/storage/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/limitador-operator/doc/tracing/ - 2025-01-24 - https://docs.kuadrant.io/dev/architecture/docs/design/architectural-overview-v1/ 2025-01-24 - - https://docs.kuadrant.io/dev/architecture/docs/design/architectural-overview/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/architecture/docs/design/modular_installation/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/doc/RELEASE/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/doc/development/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/doc/generate-gateway-api-httproute/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/doc/generate-kuadrant-auth-policy/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/doc/generate-kuadrant-rate-limit-policy/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/doc/kuadrantctl-ci-cd/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/doc/openapi-apicurio/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/doc/openapi-kuadrant-extensions/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/kuadrantctl/doc/openapi-openshift-dev-spaces/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/dns-operator/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/dns-operator/docs/RELEASE/ - 2025-01-24 - https://docs.kuadrant.io/dev/dns-operator/docs/provider/ 2025-01-24 - - https://docs.kuadrant.io/dev/dns-operator/docs/RFC/distributed-dns/dnsrecord-lifecycle/ - 2025-01-24 - - - https://docs.kuadrant.io/dev/dns-operator/docs/reference/dnsrecord/ - 2025-01-24 - \ No newline at end of file diff --git a/dev/sitemap.xml.gz b/dev/sitemap.xml.gz index 85ad22e44929b7ec8ea639f3ae32bef46dcc48a9..509eae5c4550041025bfecd0b11002b756df6b55 100644 GIT binary patch delta 977 zcmV;?11|jj3fc#d7k`@Fa+^93fbaVh8Q;q!nR8~^PU7D63EBr(!~zOJ%Sa2vdHS=$ zaSriCX4(t0@r(^f0)0sPEBWEabMJ2^jLA`W+}-VOcQ+U;RW3A-yWhWlnyH6v?)7k&4&C;&%UScYpYDBcJ?$)7kQQ@MI79 zDM=nvkxlzzhEa7b=J% z))>Wu;v!N5d1sHxz(lZAa-HjyVP&abgkI&KiQFsV8pYC^W^@(4n$-MbYKjJi!N`5d zK&Z^%fu*oF7#bH)qEB`7+4`1{oHcT$FcP_Bm2ZpSxqp=q+;%$0vk^_2;b^pYwo5)w zP?-`uh+A+dQub&#^*`??LE!Eq04$-zQI|uO-+meWL;^@k8 zgY5Bwq(;cYvGL4oyXbfUBc{KX3s22)TadLDs{w&P;n>qCp50nZn#cxcIy{WdP24V0z%iRG(G# zMSs~KO2}Y6LHC}||1o8a&9wVEWs*6S_)jK6~$0AfL4DoKuX!2S&8$r zlz~^|BO)dmqm|4uFGyavw6RwdBcv8$rAjJ;JK5You?MeC>Le#~669sUo3U((x@)to z7f%;F!E#MlS4hrt{u_#!?yd}&H>cN&;G(IKCK7k`^eZyPrdfbaPghVS$`Mhm2c?Vu=}Q%`-IgAqBqI~JF-CApH7 zU*F-bB})z*AU%i^1G4tbPuzKu!`qKfgTL5dOvK^t^8MBIoN)1x0MHO*&ldB1OX#0Z&V_4ehmeSaMKWq>uR0vobV z#&T$>o8+u$t$zwUUL}khSdpi^Yw!t%(PL7GHeilqHIn5BR&05kaMMlX@Yz|Gf2QSV zU>uFymkb1FMh{sI4hDl5WR!$U9ewd$D@bIGY$=Q}U$YiUe6oIWbLuvtrB>j+!RKC5WxLkNxc4$z!kO26p23+Fco1vg)KYRBdkC{GMYJ59KFoHK6n6`Lo%_gB z*Eb366o1i3?2*ytVgrJFtK%LhSxGW8#sFUIEU6t+SVV$#mN=ni|$`>2%eW0AdVx61RZWx{tkx8XPW!jl!U-h+Zs_< zmEQSV;0Xr&F~Ad*L98D{UrXA$7jO+J>|qEHJb%V~Zk{4-94T|bq)=G-2h-@UY#+^9|6wmM8zr}YNz#b1j8w?wkTz3~>AZrH9;2?u8_Aj*+ z^M4q~f;g7i#`7fG#;{vbl8BCC$y_rVF(RZ$1krW)NmEJy_)IAv#d`ZVO;sfmMby zAE3}GilKM}t^Q(!l!`mE66Y5w1Fy(O>}zt2FQde-T-r1!iV;!|(W%Pd7MmM54u9a) zNuA_mBtc#gye+PJ=5;N$TD8*iSP8g_?Uw0|p0|c_NukbOH*pG*c`QSegmp0n>Xz!9 zc~INYvRRX;#3mSnvJ2DQ%eJb&1od3&$tGoBg5=vw5!@Pz#+reew`LKQA_{S^90Wuh?=C?LQy> zc=+wFhsqT77rX6AIt)0Q>wTG7ne;DDh&*M~0(YoHn+p!Bt`m8i?a1q?AaOj}H@BafsCV)`p GL;wJz+N?JK